From 03db5d1058d3bbaa2cb7ac71b6da7782cc989848 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Wed, 1 Jun 2016 01:42:22 -0700 Subject: [PATCH] Update vendor/ --- trash.conf | 5 +- vendor/github.com/RackSec/srslog/.gitignore | 1 + vendor/github.com/RackSec/srslog/.travis.yml | 18 + .../RackSec/srslog/CODE_OF_CONDUCT.md | 50 + vendor/github.com/RackSec/srslog/LICENSE | 27 + vendor/github.com/RackSec/srslog/README.md | 131 + vendor/github.com/RackSec/srslog/constants.go | 68 + vendor/github.com/RackSec/srslog/dialer.go | 87 + vendor/github.com/RackSec/srslog/formatter.go | 48 + vendor/github.com/RackSec/srslog/framer.go | 24 + vendor/github.com/RackSec/srslog/net_conn.go | 30 + vendor/github.com/RackSec/srslog/srslog.go | 100 + .../github.com/RackSec/srslog/srslog_unix.go | 54 + vendor/github.com/RackSec/srslog/writer.go | 164 ++ vendor/github.com/boltdb/bolt/.gitignore | 4 + vendor/github.com/boltdb/bolt/LICENSE | 20 + vendor/github.com/boltdb/bolt/Makefile | 18 + vendor/github.com/boltdb/bolt/README.md | 844 +++++++ vendor/github.com/boltdb/bolt/appveyor.yml | 18 + vendor/github.com/boltdb/bolt/bolt_386.go | 7 + vendor/github.com/boltdb/bolt/bolt_amd64.go | 7 + vendor/github.com/boltdb/bolt/bolt_arm.go | 7 + vendor/github.com/boltdb/bolt/bolt_arm64.go | 9 + vendor/github.com/boltdb/bolt/bolt_linux.go | 10 + vendor/github.com/boltdb/bolt/bolt_openbsd.go | 27 + vendor/github.com/boltdb/bolt/bolt_ppc.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64.go | 9 + vendor/github.com/boltdb/bolt/bolt_ppc64le.go | 9 + vendor/github.com/boltdb/bolt/bolt_s390x.go | 9 + vendor/github.com/boltdb/bolt/bolt_unix.go | 89 + .../boltdb/bolt/bolt_unix_solaris.go | 90 + vendor/github.com/boltdb/bolt/bolt_windows.go | 144 ++ .../github.com/boltdb/bolt/boltsync_unix.go | 8 + vendor/github.com/boltdb/bolt/bucket.go | 748 ++++++ vendor/github.com/boltdb/bolt/cursor.go | 400 +++ vendor/github.com/boltdb/bolt/db.go | 993 ++++++++ vendor/github.com/boltdb/bolt/doc.go | 44 + vendor/github.com/boltdb/bolt/errors.go | 70 + vendor/github.com/boltdb/bolt/freelist.go | 242 ++ vendor/github.com/boltdb/bolt/node.go | 599 +++++ vendor/github.com/boltdb/bolt/page.go | 172 ++ vendor/github.com/boltdb/bolt/tx.go | 666 +++++ .../cloudfoundry/gosigar/.gitignore | 1 + .../cloudfoundry/gosigar/.travis.yml | 8 + .../github.com/cloudfoundry/gosigar/LICENSE | 201 ++ vendor/github.com/cloudfoundry/gosigar/NOTICE | 9 + .../github.com/cloudfoundry/gosigar/README.md | 22 + .../cloudfoundry/gosigar/Vagrantfile | 25 + .../cloudfoundry/gosigar/concrete_sigar.go | 69 + .../cloudfoundry/gosigar/sigar_darwin.go | 467 ++++ .../cloudfoundry/gosigar/sigar_format.go | 126 + .../cloudfoundry/gosigar/sigar_interface.go | 141 ++ .../cloudfoundry/gosigar/sigar_linux.go | 386 +++ .../cloudfoundry/gosigar/sigar_unix.go | 26 + .../cloudfoundry/gosigar/sigar_util.go | 22 + .../cloudfoundry/gosigar/sigar_windows.go | 100 + .../github.com/coreos/go-systemd/.travis.yml | 8 + vendor/github.com/coreos/go-systemd/LICENSE | 191 ++ vendor/github.com/coreos/go-systemd/README.md | 54 + .../coreos/go-systemd/activation/files.go | 52 + .../coreos/go-systemd/activation/listeners.go | 62 + .../go-systemd/activation/packetconns.go | 37 + .../coreos/go-systemd/daemon/sdnotify.go | 31 + .../github.com/coreos/go-systemd/dbus/dbus.go | 187 ++ .../coreos/go-systemd/dbus/methods.go | 410 ++++ .../coreos/go-systemd/dbus/properties.go | 218 ++ .../github.com/coreos/go-systemd/dbus/set.go | 47 + .../coreos/go-systemd/dbus/subscription.go | 250 ++ .../go-systemd/dbus/subscription_set.go | 57 + vendor/github.com/coreos/go-systemd/test | 76 + .../github.com/coreos/go-systemd/util/util.go | 33 + vendor/github.com/docker/containerd/Makefile | 27 +- .../containerd/api/grpc/server/server.go | 468 ++++ .../api/grpc/server/server_linux.go | 59 + .../api/grpc/server/server_solaris.go | 14 + .../containerd/api/grpc/types/api.pb.go | 1438 +++++++++++ .../containerd/api/grpc/types/api.proto | 311 +++ .../docker/containerd/archutils/epoll.go | 19 + .../containerd/archutils/epoll_arm64.go | 70 + .../docker/containerd/runtime/container.go | 593 +++++ .../containerd/runtime/container_linux.go | 134 + .../containerd/runtime/container_solaris.go | 19 + .../containerd/runtime/direct_process.go | 283 +++ .../docker/containerd/runtime/process.go | 340 +++ .../docker/containerd/runtime/runtime.go | 99 + .../docker/containerd/runtime/stats.go | 77 + .../docker/containerd/specs/spec_linux.go | 9 + .../docker/containerd/specs/spec_solaris.go | 8 + .../containerd/supervisor/add_process.go | 43 + .../containerd/supervisor/checkpoint.go | 35 + .../docker/containerd/supervisor/create.go | 67 + .../containerd/supervisor/create_solaris.go | 8 + .../docker/containerd/supervisor/delete.go | 42 + .../docker/containerd/supervisor/errors.go | 24 + .../docker/containerd/supervisor/exit.go | 81 + .../containerd/supervisor/get_containers.go | 28 + .../docker/containerd/supervisor/machine.go | 25 + .../containerd/supervisor/machine_solaris.go | 15 + .../docker/containerd/supervisor/metrics.go | 31 + .../containerd/supervisor/monitor_linux.go | 129 + .../containerd/supervisor/monitor_solaris.go | 38 + .../docker/containerd/supervisor/oom.go | 22 + .../docker/containerd/supervisor/signal.go | 27 + .../docker/containerd/supervisor/sort.go | 27 + .../docker/containerd/supervisor/stats.go | 33 + .../containerd/supervisor/supervisor.go | 385 +++ .../docker/containerd/supervisor/task.go | 33 + .../docker/containerd/supervisor/types.go | 12 + .../docker/containerd/supervisor/update.go | 92 + .../docker/containerd/supervisor/worker.go | 74 + .../github.com/docker/containerd/version.go | 11 + .../docker/distribution/manifest/doc.go | 1 + .../manifest/manifestlist/manifestlist.go | 155 ++ .../manifest/schema1/config_builder.go | 281 +++ .../distribution/manifest/schema1/manifest.go | 184 ++ .../manifest/schema1/reference_builder.go | 98 + .../distribution/manifest/schema1/sign.go | 68 + .../distribution/manifest/schema1/verify.go | 32 + .../distribution/manifest/schema2/builder.go | 77 + .../distribution/manifest/schema2/manifest.go | 125 + .../docker/distribution/manifest/versioned.go | 12 + vendor/github.com/docker/docker/api/README.md | 5 + .../docker/docker/api/client/attach.go | 109 + .../docker/docker/api/client/build.go | 321 +++ .../docker/docker/api/client/cli.go | 215 ++ .../docker/docker/api/client/client.go | 5 + .../docker/docker/api/client/commit.go | 85 + .../github.com/docker/docker/api/client/cp.go | 298 +++ .../docker/docker/api/client/create.go | 164 ++ .../docker/docker/api/client/diff.go | 49 + .../docker/docker/api/client/events.go | 146 ++ .../docker/docker/api/client/exec.go | 166 ++ .../docker/docker/api/client/export.go | 42 + .../docker/api/client/formatter/custom.go | 242 ++ .../docker/api/client/formatter/formatter.go | 255 ++ .../docker/docker/api/client/hijack.go | 56 + .../docker/docker/api/client/history.go | 76 + .../docker/docker/api/client/images.go | 81 + .../docker/docker/api/client/import.go | 82 + .../docker/docker/api/client/info.go | 155 ++ .../docker/docker/api/client/inspect.go | 127 + .../docker/api/client/inspect/inspector.go | 119 + .../api/client/inspect/inspector_go14.go | 40 + .../api/client/inspect/inspector_go15.go | 29 + .../docker/docker/api/client/kill.go | 35 + .../docker/docker/api/client/load.go | 50 + .../docker/docker/api/client/login.go | 177 ++ .../docker/docker/api/client/logout.go | 41 + .../docker/docker/api/client/logs.go | 65 + .../docker/docker/api/client/network.go | 392 +++ .../docker/docker/api/client/pause.go | 34 + .../docker/docker/api/client/port.go | 61 + .../github.com/docker/docker/api/client/ps.go | 89 + .../docker/docker/api/client/pull.go | 73 + .../docker/docker/api/client/push.go | 72 + .../docker/docker/api/client/rename.go | 34 + .../docker/docker/api/client/restart.go | 35 + .../github.com/docker/docker/api/client/rm.go | 56 + .../docker/docker/api/client/rmi.go | 59 + .../docker/docker/api/client/run.go | 274 +++ .../docker/docker/api/client/save.go | 42 + .../docker/docker/api/client/search.go | 93 + .../docker/docker/api/client/start.go | 157 ++ .../docker/docker/api/client/stats.go | 208 ++ .../docker/docker/api/client/stats_helpers.go | 219 ++ .../docker/docker/api/client/stop.go | 37 + .../docker/docker/api/client/tag.go | 46 + .../docker/docker/api/client/top.go | 41 + .../docker/docker/api/client/unpause.go | 34 + .../docker/docker/api/client/update.go | 117 + .../docker/docker/api/client/utils.go | 202 ++ .../docker/docker/api/client/version.go | 95 + .../docker/docker/api/client/volume.go | 177 ++ .../docker/docker/api/client/wait.go | 37 + vendor/github.com/docker/docker/api/common.go | 146 ++ .../docker/api/server/httputils/errors.go | 70 + .../docker/api/server/httputils/form.go | 73 + .../docker/api/server/httputils/httputils.go | 107 + .../docker/docker/api/server/middleware.go | 41 + .../api/server/middleware/authorization.go | 42 + .../docker/api/server/middleware/cors.go | 33 + .../docker/api/server/middleware/debug.go | 56 + .../api/server/middleware/middleware.go | 7 + .../api/server/middleware/user_agent.go | 37 + .../docker/api/server/middleware/version.go | 45 + .../docker/docker/api/server/profiler.go | 40 + .../docker/api/server/router/build/backend.go | 20 + .../docker/api/server/router/build/build.go | 29 + .../api/server/router/build/build_routes.go | 213 ++ .../api/server/router/container/backend.go | 71 + .../api/server/router/container/container.go | 63 + .../router/container/container_routes.go | 531 ++++ .../api/server/router/container/copy.go | 112 + .../api/server/router/container/exec.go | 134 + .../api/server/router/container/inspect.go | 21 + .../docker/api/server/router/image/backend.go | 44 + .../docker/api/server/router/image/image.go | 44 + .../api/server/router/image/image_routes.go | 383 +++ .../docker/docker/api/server/router/local.go | 61 + .../docker/docker/api/server/router/router.go | 19 + .../api/server/router/system/backend.go | 18 + .../docker/api/server/router/system/system.go | 33 + .../api/server/router/system/system_routes.go | 125 + .../api/server/router/volume/backend.go | 15 + .../docker/api/server/router/volume/volume.go | 35 + .../api/server/router/volume/volume_routes.go | 66 + .../docker/api/server/router_swapper.go | 30 + .../docker/docker/api/server/server.go | 195 ++ .../docker/api/types/backend/backend.go | 69 + .../docker/docker/builder/context.go | 20 +- .../docker/docker/builder/dockerfile/bflag.go | 176 ++ .../docker/builder/dockerfile/builder.go | 326 +++ .../builder/dockerfile/command/command.go | 42 + .../docker/builder/dockerfile/dispatchers.go | 639 +++++ .../docker/builder/dockerfile/envVarTest | 112 + .../docker/builder/dockerfile/evaluator.go | 215 ++ .../docker/builder/dockerfile/internals.go | 662 +++++ .../builder/dockerfile/parser/line_parsers.go | 331 +++ .../builder/dockerfile/parser/parser.go | 161 ++ .../docker/builder/dockerfile/parser/utils.go | 176 ++ .../docker/builder/dockerfile/shell_parser.go | 314 +++ .../docker/builder/dockerfile/support.go | 16 + .../docker/builder/dockerfile/wordsTest | 25 + vendor/github.com/docker/docker/cli/cli.go | 200 ++ vendor/github.com/docker/docker/cli/client.go | 12 + vendor/github.com/docker/docker/cli/common.go | 80 + .../cliconfig/credentials/credentials.go | 17 + .../cliconfig/credentials/default_store.go | 22 + .../credentials/default_store_darwin.go | 3 + .../credentials/default_store_linux.go | 3 + .../credentials/default_store_unsupported.go | 5 + .../cliconfig/credentials/file_store.go | 67 + .../cliconfig/credentials/native_store.go | 196 ++ .../cliconfig/credentials/shell_command.go | 28 + .../docker/docker/container/archive.go | 69 + .../docker/docker/container/container.go | 649 +++++ .../docker/docker/container/container_unix.go | 405 +++ .../docker/docker/container/history.go | 30 + .../docker/docker/container/memory_store.go | 92 + .../docker/docker/container/monitor.go | 60 + .../docker/docker/container/mounts_unix.go | 12 + .../docker/docker/container/state.go | 283 +++ .../docker/docker/container/state_unix.go | 10 + .../docker/docker/container/store.go | 28 + .../docker/docker/daemon/apparmor_default.go | 30 + .../daemon/apparmor_default_unsupported.go | 6 + .../docker/docker/daemon/archive.go | 432 ++++ .../docker/docker/daemon/archive_unix.go | 57 + .../github.com/docker/docker/daemon/attach.go | 120 + .../docker/docker/daemon/caps/utils_unix.go | 131 + .../docker/docker/daemon/changes.go | 15 + .../github.com/docker/docker/daemon/commit.go | 233 ++ .../github.com/docker/docker/daemon/config.go | 358 +++ .../docker/daemon/config_experimental.go | 8 + .../docker/docker/daemon/config_stub.go | 8 + .../docker/docker/daemon/config_unix.go | 88 + .../daemon/container_operations_unix.go | 319 +++ .../github.com/docker/docker/daemon/create.go | 185 ++ .../docker/docker/daemon/create_unix.go | 76 + .../github.com/docker/docker/daemon/daemon.go | 1538 ++++++++++++ .../docker/daemon/daemon_experimental.go | 9 + .../docker/docker/daemon/daemon_linux.go | 80 + .../docker/docker/daemon/daemon_stub.go | 9 + .../docker/docker/daemon/daemon_unix.go | 955 ++++++++ .../docker/daemon/daemon_unsupported.go | 5 + .../docker/docker/daemon/debugtrap_unix.go | 21 + .../docker/daemon/debugtrap_unsupported.go | 7 + .../github.com/docker/docker/daemon/delete.go | 157 ++ .../github.com/docker/docker/daemon/errors.go | 57 + .../github.com/docker/docker/daemon/events.go | 71 + .../docker/docker/daemon/events/events.go | 142 ++ .../docker/docker/daemon/events/filter.go | 82 + .../github.com/docker/docker/daemon/exec.go | 246 ++ .../docker/docker/daemon/exec/exec.go | 93 + .../docker/docker/daemon/exec_linux.go | 26 + .../github.com/docker/docker/daemon/export.go | 55 + .../docker/daemon/graphdriver/counter.go | 32 + .../docker/daemon/graphdriver/overlay/copy.go | 169 ++ .../daemon/graphdriver/overlay/overlay.go | 486 ++++ .../overlay/overlay_unsupported.go | 3 + .../graphdriver/register/register_overlay.go | 8 + .../graphdriver/register/register_vfs.go | 6 + .../docker/daemon/graphdriver/vfs/driver.go | 135 + .../docker/docker/daemon/image_delete.go | 371 +++ .../github.com/docker/docker/daemon/images.go | 162 ++ .../github.com/docker/docker/daemon/import.go | 109 + .../github.com/docker/docker/daemon/info.go | 162 ++ .../docker/docker/daemon/inspect.go | 245 ++ .../docker/docker/daemon/inspect_unix.go | 91 + .../github.com/docker/docker/daemon/kill.go | 153 ++ .../github.com/docker/docker/daemon/links.go | 87 + .../docker/docker/daemon/links/links.go | 141 ++ .../github.com/docker/docker/daemon/list.go | 515 ++++ .../docker/docker/daemon/list_unix.go | 11 + .../docker/docker/daemon/logdrivers_linux.go | 8 + .../docker/docker/daemon/logger/context.go | 112 + .../docker/docker/daemon/logger/copier.go | 86 + .../docker/docker/daemon/logger/factory.go | 89 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 147 ++ .../docker/daemon/logger/jsonfilelog/read.go | 235 ++ .../docker/docker/daemon/logger/logger.go | 87 + .../daemon/logger/loggerutils/log_tag.go | 46 + .../logger/loggerutils/rotatefilewriter.go | 124 + .../docker/daemon/logger/syslog/syslog.go | 247 ++ .../logger/syslog/syslog_unsupported.go | 3 + .../github.com/docker/docker/daemon/logs.go | 154 ++ .../docker/docker/daemon/monitor.go | 144 ++ .../docker/docker/daemon/monitor_linux.go | 14 + .../github.com/docker/docker/daemon/mounts.go | 48 + .../docker/docker/daemon/network/settings.go | 22 + .../docker/daemon/network_operations.go | 77 + .../docker/docker/daemon/oci_linux.go | 686 ++++++ .../github.com/docker/docker/daemon/pause.go | 49 + .../github.com/docker/docker/daemon/rename.go | 65 + .../github.com/docker/docker/daemon/resize.go | 40 + .../docker/docker/daemon/restart.go | 48 + .../docker/docker/daemon/seccomp_disabled.go | 12 + .../docker/docker/daemon/seccomp_linux.go | 46 + .../docker/docker/daemon/selinux_linux.go | 17 + .../docker/daemon/selinux_unsupported.go | 13 + .../github.com/docker/docker/daemon/start.go | 185 ++ .../github.com/docker/docker/daemon/stats.go | 121 + .../docker/daemon/stats_collector_unix.go | 189 ++ .../github.com/docker/docker/daemon/stop.go | 65 + .../docker/docker/daemon/top_unix.go | 85 + .../docker/docker/daemon/unpause.go | 43 + .../github.com/docker/docker/daemon/update.go | 100 + .../docker/docker/daemon/update_linux.go | 25 + .../docker/docker/daemon/volumes.go | 178 ++ .../docker/docker/daemon/volumes_unix.go | 78 + .../github.com/docker/docker/daemon/wait.go | 17 + .../docker/docker/distribution/errors.go | 113 + .../docker/distribution/metadata/metadata.go | 77 + .../distribution/metadata/v1_id_service.go | 44 + .../metadata/v2_metadata_service.go | 137 ++ .../docker/docker/distribution/pull.go | 205 ++ .../docker/docker/distribution/pull_v1.go | 362 +++ .../docker/docker/distribution/pull_v2.go | 840 +++++++ .../docker/distribution/pull_v2_unix.go | 12 + .../docker/docker/distribution/push.go | 219 ++ .../docker/docker/distribution/push_v1.go | 454 ++++ .../docker/docker/distribution/push_v2.go | 438 ++++ .../docker/docker/distribution/registry.go | 130 + .../docker/distribution/xfer/download.go | 430 ++++ .../docker/distribution/xfer/transfer.go | 392 +++ .../docker/docker/distribution/xfer/upload.go | 163 ++ .../github.com/docker/docker/docker/README.md | 3 + .../github.com/docker/docker/docker/client.go | 33 + .../github.com/docker/docker/docker/common.go | 100 + .../github.com/docker/docker/docker/daemon.go | 417 ++++ .../docker/docker/docker/daemon_freebsd.go | 7 + .../docker/docker/docker/daemon_linux.go | 13 + .../docker/docker/docker/daemon_none.go | 13 + .../docker/docker/docker/daemon_unix.go | 82 + .../github.com/docker/docker/docker/docker.go | 77 + .../github.com/docker/docker/docker/flags.go | 30 + .../docker/docker/listeners/listeners.go | 19 + .../docker/docker/listeners/listeners_unix.go | 89 + .../github.com/docker/docker/docker/runc.go | 12 + .../docker/docker/dockerversion/useragent.go | 74 + .../docker/dockerversion/version_lib.go | 13 + .../github.com/docker/docker/errors/errors.go | 41 + .../docker/docker/image/tarexport/load.go | 372 +++ .../docker/docker/image/tarexport/save.go | 319 +++ .../docker/image/tarexport/tarexport.go | 37 + .../docker/docker/libcontainerd/client.go | 46 + .../docker/libcontainerd/client_linux.go | 401 +++ .../libcontainerd/client_liverestore_linux.go | 83 + .../client_shutdownrestore_linux.go | 46 + .../docker/docker/libcontainerd/container.go | 40 + .../docker/libcontainerd/container_linux.go | 209 ++ .../libcontainerd/pausemonitor_linux.go | 31 + .../docker/docker/libcontainerd/process.go | 18 + .../docker/libcontainerd/process_linux.go | 110 + .../docker/libcontainerd/queue_linux.go | 29 + .../docker/docker/libcontainerd/remote.go | 18 + .../docker/libcontainerd/remote_linux.go | 290 +++ .../docker/docker/libcontainerd/rpc_bridge.go | 48 + .../libcontainerd/rpc_bridge_wrapper.go | 131 + .../docker/docker/libcontainerd/types.go | 60 + .../docker/libcontainerd/types_linux.go | 47 + .../docker/libcontainerd/utils_linux.go | 52 + .../docker/docker/oci/defaults_linux.go | 210 ++ .../docker/docker/pkg/aaparser/aaparser.go | 92 + .../docker/docker/pkg/authorization/api.go | 54 + .../docker/docker/pkg/authorization/authz.go | 165 ++ .../docker/docker/pkg/authorization/plugin.go | 83 + .../docker/pkg/authorization/response.go | 203 ++ .../docker/pkg/broadcaster/unbuffered.go | 49 + .../docker/pkg/filenotify/filenotify.go | 40 + .../docker/docker/pkg/filenotify/fsnotify.go | 18 + .../docker/docker/pkg/filenotify/poller.go | 204 ++ .../docker/docker/pkg/locker/README.md | 65 + .../docker/docker/pkg/locker/locker.go | 112 + .../pkg/namesgenerator/names-generator.go | 524 ++++ .../docker/pkg/parsers/kernel/kernel.go | 100 + .../docker/pkg/parsers/kernel/uname_linux.go | 19 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem_freebsd.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../docker/docker/pkg/parsers/parsers.go | 69 + .../docker/docker/pkg/pidfile/pidfile.go | 50 + .../pkg/platform/architecture_freebsd.go | 15 + .../docker/pkg/platform/architecture_linux.go | 16 + .../docker/docker/pkg/platform/platform.go | 23 + .../docker/pkg/platform/utsname_int8.go | 18 + .../docker/pkg/platform/utsname_uint8.go | 18 + .../docker/docker/pkg/pubsub/publisher.go | 111 + .../docker/docker/pkg/registrar/registrar.go | 127 + .../docker/docker/pkg/stringutils/README.md | 1 + .../docker/pkg/stringutils/stringutils.go | 87 + .../docker/docker/pkg/sysinfo/README.md | 1 + .../docker/docker/pkg/sysinfo/sysinfo.go | 128 + .../docker/pkg/sysinfo/sysinfo_freebsd.go | 7 + .../docker/pkg/sysinfo/sysinfo_linux.go | 246 ++ .../docker/docker/pkg/tailfile/tailfile.go | 66 + .../docker/pkg/truncindex/truncindex.go | 137 ++ .../docker/docker/pkg/useragent/README.md | 1 + .../docker/docker/pkg/useragent/useragent.go | 55 + .../docker/profiles/apparmor/apparmor.go | 115 + .../docker/profiles/apparmor/template.go | 46 + .../docker/profiles/seccomp/default.json | 1628 ++++++++++++ .../docker/profiles/seccomp/generate.go | 32 + .../docker/docker/profiles/seccomp/seccomp.go | 74 + .../profiles/seccomp/seccomp_default.go | 1659 +++++++++++++ .../profiles/seccomp/seccomp_unsupported.go | 10 + .../docker/restartmanager/restartmanager.go | 131 + .../docker/docker/runconfig/compare.go | 61 + .../docker/docker/runconfig/config.go | 71 + .../docker/docker/runconfig/config_unix.go | 59 + .../docker/docker/runconfig/errors.go | 40 + .../docker/docker/runconfig/hostconfig.go | 35 + .../docker/runconfig/hostconfig_unix.go | 89 + .../docker/docker/runconfig/streams.go | 109 + vendor/github.com/docker/docker/trash.conf | 4 +- .../github.com/docker/docker/utils/debug.go | 26 + .../docker/docker/utils/experimental.go | 9 + .../github.com/docker/docker/utils/names.go | 12 + .../docker/docker/utils/process_unix.go | 22 + .../github.com/docker/docker/utils/stubs.go | 9 + .../docker/utils/templates/templates.go | 33 + .../github.com/docker/docker/utils/utils.go | 87 + .../docker/docker/volume/drivers/adapter.go | 106 + .../docker/docker/volume/drivers/extpoint.go | 164 ++ .../docker/docker/volume/drivers/proxy.go | 207 ++ .../docker/docker/volume/local/local.go | 330 +++ .../docker/docker/volume/local/local_unix.go | 69 + .../docker/docker/volume/store/errors.go | 74 + .../docker/docker/volume/store/store.go | 506 ++++ .../docker/docker/volume/store/store_unix.go | 9 + .../github.com/docker/docker/volume/volume.go | 133 + .../docker/docker/volume/volume_copy.go | 28 + .../docker/volume/volume_propagation_linux.go | 44 + .../volume/volume_propagation_unsupported.go | 22 + .../docker/docker/volume/volume_unix.go | 186 ++ .../docker/engine-api/types/events/events.go | 38 + .../engine-api/types/versions/README.md | 14 + .../engine-api/types/versions/v1p19/types.go | 35 + .../engine-api/types/versions/v1p20/types.go | 40 + .../docker/libtrust/CONTRIBUTING.md | 13 + vendor/github.com/docker/libtrust/LICENSE | 191 ++ vendor/github.com/docker/libtrust/MAINTAINERS | 3 + vendor/github.com/docker/libtrust/README.md | 18 + .../docker/libtrust/certificates.go | 175 ++ vendor/github.com/docker/libtrust/doc.go | 9 + vendor/github.com/docker/libtrust/ec_key.go | 428 ++++ vendor/github.com/docker/libtrust/filter.go | 50 + vendor/github.com/docker/libtrust/hash.go | 56 + vendor/github.com/docker/libtrust/jsonsign.go | 657 +++++ vendor/github.com/docker/libtrust/key.go | 253 ++ .../github.com/docker/libtrust/key_files.go | 255 ++ .../github.com/docker/libtrust/key_manager.go | 175 ++ vendor/github.com/docker/libtrust/rsa_key.go | 427 ++++ vendor/github.com/docker/libtrust/util.go | 363 +++ vendor/github.com/godbus/dbus/CONTRIBUTING.md | 50 + vendor/github.com/godbus/dbus/LICENSE | 25 + vendor/github.com/godbus/dbus/MAINTAINERS | 2 + vendor/github.com/godbus/dbus/README.markdown | 41 + vendor/github.com/godbus/dbus/auth.go | 253 ++ .../github.com/godbus/dbus/auth_external.go | 26 + vendor/github.com/godbus/dbus/auth_sha1.go | 102 + vendor/github.com/godbus/dbus/call.go | 36 + vendor/github.com/godbus/dbus/conn.go | 625 +++++ vendor/github.com/godbus/dbus/conn_darwin.go | 21 + vendor/github.com/godbus/dbus/conn_other.go | 27 + vendor/github.com/godbus/dbus/dbus.go | 258 ++ vendor/github.com/godbus/dbus/decoder.go | 228 ++ vendor/github.com/godbus/dbus/doc.go | 63 + vendor/github.com/godbus/dbus/encoder.go | 208 ++ vendor/github.com/godbus/dbus/export.go | 411 ++++ vendor/github.com/godbus/dbus/homedir.go | 28 + .../github.com/godbus/dbus/homedir_dynamic.go | 15 + .../github.com/godbus/dbus/homedir_static.go | 45 + vendor/github.com/godbus/dbus/message.go | 346 +++ vendor/github.com/godbus/dbus/object.go | 126 + vendor/github.com/godbus/dbus/sig.go | 257 ++ .../godbus/dbus/transport_darwin.go | 6 + .../godbus/dbus/transport_generic.go | 35 + .../github.com/godbus/dbus/transport_unix.go | 196 ++ .../dbus/transport_unixcred_dragonfly.go | 95 + .../godbus/dbus/transport_unixcred_linux.go | 25 + vendor/github.com/godbus/dbus/variant.go | 139 ++ .../github.com/godbus/dbus/variant_lexer.go | 284 +++ .../github.com/godbus/dbus/variant_parser.go | 817 +++++++ vendor/github.com/golang/protobuf/.gitignore | 15 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 31 + .../github.com/golang/protobuf/Make.protobuf | 40 + vendor/github.com/golang/protobuf/Makefile | 54 + vendor/github.com/golang/protobuf/README.md | 199 ++ .../github.com/golang/protobuf/proto/Makefile | 43 + .../github.com/golang/protobuf/proto/clone.go | 223 ++ .../golang/protobuf/proto/decode.go | 868 +++++++ .../golang/protobuf/proto/encode.go | 1331 ++++++++++ .../github.com/golang/protobuf/proto/equal.go | 276 +++ .../golang/protobuf/proto/extensions.go | 399 +++ .../github.com/golang/protobuf/proto/lib.go | 894 +++++++ .../golang/protobuf/proto/message_set.go | 280 +++ .../golang/protobuf/proto/pointer_reflect.go | 479 ++++ .../golang/protobuf/proto/pointer_unsafe.go | 266 ++ .../golang/protobuf/proto/properties.go | 846 +++++++ .../github.com/golang/protobuf/proto/text.go | 849 +++++++ .../golang/protobuf/proto/text_parser.go | 871 +++++++ vendor/github.com/imdario/mergo/.travis.yml | 2 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 122 + vendor/github.com/imdario/mergo/doc.go | 44 + vendor/github.com/imdario/mergo/map.go | 154 ++ vendor/github.com/imdario/mergo/merge.go | 120 + vendor/github.com/imdario/mergo/mergo.go | 90 + .../mattn/go-shellwords/.travis.yml | 9 + .../github.com/mattn/go-shellwords/README.md | 47 + .../mattn/go-shellwords/shellwords.go | 134 + .../mattn/go-shellwords/util_posix.go | 19 + .../mattn/go-shellwords/util_windows.go | 17 + .../opencontainers/runc/checkpoint.go | 89 + .../github.com/opencontainers/runc/delete.go | 38 + .../github.com/opencontainers/runc/events.go | 100 + vendor/github.com/opencontainers/runc/exec.go | 187 ++ vendor/github.com/opencontainers/runc/kill.go | 96 + .../runc/libcontainer/apparmor/apparmor.go | 38 + .../apparmor/apparmor_disabled.go | 20 + .../runc/libcontainer/capabilities_linux.go | 69 + .../runc/libcontainer/cgroups/cgroups.go | 64 + .../cgroups/cgroups_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/apply_raw.go | 400 +++ .../runc/libcontainer/cgroups/fs/blkio.go | 237 ++ .../runc/libcontainer/cgroups/fs/cpu.go | 94 + .../runc/libcontainer/cgroups/fs/cpuacct.go | 121 + .../runc/libcontainer/cgroups/fs/cpuset.go | 139 ++ .../runc/libcontainer/cgroups/fs/devices.go | 78 + .../runc/libcontainer/cgroups/fs/freezer.go | 61 + .../libcontainer/cgroups/fs/fs_unsupported.go | 3 + .../runc/libcontainer/cgroups/fs/hugetlb.go | 71 + .../runc/libcontainer/cgroups/fs/memory.go | 249 ++ .../runc/libcontainer/cgroups/fs/name.go | 40 + .../runc/libcontainer/cgroups/fs/net_cls.go | 41 + .../runc/libcontainer/cgroups/fs/net_prio.go | 41 + .../libcontainer/cgroups/fs/perf_event.go | 35 + .../runc/libcontainer/cgroups/fs/pids.go | 73 + .../runc/libcontainer/cgroups/fs/utils.go | 79 + .../runc/libcontainer/cgroups/stats.go | 105 + .../cgroups/systemd/apply_nosystemd.go | 55 + .../cgroups/systemd/apply_systemd.go | 479 ++++ .../runc/libcontainer/cgroups/utils.go | 378 +++ .../runc/libcontainer/compat_1.5_linux.go | 10 + .../runc/libcontainer/configs/blkio_device.go | 61 + .../runc/libcontainer/configs/cgroup_unix.go | 124 + .../configs/cgroup_unsupported.go | 6 + .../libcontainer/configs/cgroup_windows.go | 6 + .../runc/libcontainer/configs/config.go | 328 +++ .../runc/libcontainer/configs/config_unix.go | 51 + .../runc/libcontainer/configs/device.go | 57 + .../libcontainer/configs/device_defaults.go | 125 + .../libcontainer/configs/hugepage_limit.go | 9 + .../configs/interface_priority_map.go | 14 + .../runc/libcontainer/configs/mount.go | 30 + .../runc/libcontainer/configs/namespaces.go | 5 + .../configs/namespaces_syscall.go | 31 + .../configs/namespaces_syscall_unsupported.go | 15 + .../libcontainer/configs/namespaces_unix.go | 127 + .../configs/namespaces_unsupported.go | 8 + .../runc/libcontainer/configs/network.go | 72 + .../configs/validate/validator.go | 133 + .../runc/libcontainer/console.go | 15 + .../runc/libcontainer/console_freebsd.go | 13 + .../runc/libcontainer/console_linux.go | 145 ++ .../runc/libcontainer/console_windows.go | 30 + .../runc/libcontainer/container.go | 144 ++ .../runc/libcontainer/container_linux.go | 1228 ++++++++++ .../runc/libcontainer/container_windows.go | 20 + .../runc/libcontainer/criu_opts_unix.go | 37 + .../runc/libcontainer/criu_opts_windows.go | 6 + .../runc/libcontainer/criurpc/Makefile | 2 + .../runc/libcontainer/criurpc/criurpc.pb.go | 822 +++++++ .../runc/libcontainer/criurpc/criurpc.proto | 174 ++ .../runc/libcontainer/devices/devices_unix.go | 102 + .../devices/devices_unsupported.go | 3 + .../runc/libcontainer/devices/number.go | 24 + .../opencontainers/runc/libcontainer/error.go | 70 + .../runc/libcontainer/factory.go | 45 + .../runc/libcontainer/factory_linux.go | 290 +++ .../runc/libcontainer/generic_error.go | 87 + .../runc/libcontainer/init_linux.go | 367 +++ .../runc/libcontainer/keys/keyctl.go | 67 + .../runc/libcontainer/label/label.go | 80 + .../runc/libcontainer/label/label_selinux.go | 197 ++ .../runc/libcontainer/message_linux.go | 88 + .../runc/libcontainer/network_linux.go | 259 ++ .../runc/libcontainer/notify_linux.go | 89 + .../runc/libcontainer/nsenter/README.md | 25 + .../runc/libcontainer/nsenter/nsenter.go | 12 + .../libcontainer/nsenter/nsenter_gccgo.go | 25 + .../nsenter/nsenter_unsupported.go | 5 + .../runc/libcontainer/nsenter/nsexec.c | 472 ++++ .../runc/libcontainer/process.go | 121 + .../runc/libcontainer/process_linux.go | 487 ++++ .../runc/libcontainer/restored_process.go | 122 + .../runc/libcontainer/rootfs_linux.go | 725 ++++++ .../runc/libcontainer/seccomp/config.go | 71 + .../libcontainer/seccomp/seccomp_linux.go | 229 ++ .../seccomp/seccomp_unsupported.go | 24 + .../runc/libcontainer/selinux/selinux.go | 485 ++++ .../runc/libcontainer/setgroups_linux.go | 11 + .../runc/libcontainer/setns_init_linux.go | 53 + .../runc/libcontainer/specconv/spec_linux.go | 740 ++++++ .../runc/libcontainer/stacktrace/capture.go | 27 + .../runc/libcontainer/stacktrace/frame.go | 38 + .../libcontainer/stacktrace/stacktrace.go | 5 + .../runc/libcontainer/standard_init_linux.go | 147 ++ .../runc/libcontainer/state_linux.go | 228 ++ .../opencontainers/runc/libcontainer/stats.go | 15 + .../runc/libcontainer/stats_freebsd.go | 5 + .../runc/libcontainer/stats_linux.go | 8 + .../runc/libcontainer/stats_windows.go | 5 + .../runc/libcontainer/system/linux.go | 148 ++ .../runc/libcontainer/system/proc.go | 27 + .../runc/libcontainer/system/setns_linux.go | 40 + .../libcontainer/system/syscall_linux_386.go | 25 + .../libcontainer/system/syscall_linux_64.go | 25 + .../libcontainer/system/syscall_linux_arm.go | 25 + .../runc/libcontainer/system/sysconfig.go | 12 + .../libcontainer/system/sysconfig_notcgo.go | 15 + .../runc/libcontainer/system/unsupported.go | 9 + .../runc/libcontainer/system/xattrs_linux.go | 99 + .../runc/libcontainer/utils/utils.go | 102 + .../runc/libcontainer/utils/utils_unix.go | 33 + vendor/github.com/opencontainers/runc/list.go | 124 + vendor/github.com/opencontainers/runc/main.go | 126 + .../opencontainers/runc/main_unix.go | 5 + .../opencontainers/runc/main_unsupported.go | 13 + .../github.com/opencontainers/runc/pause.go | 47 + .../github.com/opencontainers/runc/restore.go | 191 ++ .../opencontainers/runc/rlimit_linux.go | 49 + .../github.com/opencontainers/runc/signals.go | 116 + vendor/github.com/opencontainers/runc/spec.go | 249 ++ .../github.com/opencontainers/runc/start.go | 138 ++ .../github.com/opencontainers/runc/state.go | 70 + vendor/github.com/opencontainers/runc/tty.go | 126 + .../github.com/opencontainers/runc/utils.go | 287 +++ .../opencontainers/runtime-spec/.gitignore | 2 + .../opencontainers/runtime-spec/.travis.yml | 18 + .../opencontainers/runtime-spec/ChangeLog | 280 +++ .../opencontainers/runtime-spec/LICENSE | 191 ++ .../opencontainers/runtime-spec/MAINTAINERS | 9 + .../opencontainers/runtime-spec/Makefile | 71 + .../opencontainers/runtime-spec/README.md | 159 ++ .../opencontainers/runtime-spec/ROADMAP.md | 103 + .../opencontainers/runtime-spec/bundle.md | 24 + .../runtime-spec/code-of-conduct.md | 37 + .../runtime-spec/config-linux.md | 579 +++++ .../opencontainers/runtime-spec/config.md | 619 +++++ .../opencontainers/runtime-spec/glossary.md | 36 + .../runtime-spec/implementations.md | 18 + .../opencontainers/runtime-spec/principles.md | 46 + .../opencontainers/runtime-spec/project.md | 10 + .../runtime-spec/runtime-linux.md | 8 + .../opencontainers/runtime-spec/runtime.md | 97 + .../runtime-spec/specs-go/config.go | 419 ++++ .../runtime-spec/specs-go/state.go | 13 + .../runtime-spec/specs-go/version.go | 18 + .../opencontainers/runtime-spec/style.md | 97 + .../opencontainers/specs/.gitignore | 2 + .../opencontainers/specs/.travis.yml | 18 + .../github.com/opencontainers/specs/ChangeLog | 280 +++ .../github.com/opencontainers/specs/LICENSE | 191 ++ .../opencontainers/specs/MAINTAINERS | 9 + .../github.com/opencontainers/specs/Makefile | 71 + .../github.com/opencontainers/specs/README.md | 159 ++ .../opencontainers/specs/ROADMAP.md | 103 + .../github.com/opencontainers/specs/bundle.md | 24 + .../opencontainers/specs/code-of-conduct.md | 37 + .../opencontainers/specs/config-linux.md | 579 +++++ .../github.com/opencontainers/specs/config.md | 619 +++++ .../opencontainers/specs/glossary.md | 36 + .../opencontainers/specs/implementations.md | 18 + .../opencontainers/specs/principles.md | 46 + .../opencontainers/specs/project.md | 10 + .../opencontainers/specs/runtime-linux.md | 8 + .../opencontainers/specs/runtime.md | 97 + .../opencontainers/specs/specs-go/config.go | 419 ++++ .../opencontainers/specs/specs-go/state.go | 13 + .../opencontainers/specs/specs-go/version.go | 18 + .../github.com/opencontainers/specs/style.md | 97 + .../github.com/rcrowley/go-metrics/.gitignore | 9 + .../rcrowley/go-metrics/.travis.yml | 14 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/README.md | 139 ++ .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 118 + .../github.com/rcrowley/go-metrics/gauge.go | 84 + .../rcrowley/go-metrics/gauge_float64.go | 91 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 ++ vendor/github.com/rcrowley/go-metrics/json.go | 83 + vendor/github.com/rcrowley/go-metrics/log.go | 77 + .../github.com/rcrowley/go-metrics/memory.md | 285 +++ .../github.com/rcrowley/go-metrics/meter.go | 233 ++ .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 247 ++ .../github.com/rcrowley/go-metrics/runtime.go | 212 ++ .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 609 +++++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 311 +++ .../rcrowley/go-metrics/validate.sh | 10 + .../github.com/rcrowley/go-metrics/writer.go | 100 + .../seccomp/libseccomp-golang/LICENSE | 22 + .../seccomp/libseccomp-golang/README | 26 + .../seccomp/libseccomp-golang/seccomp.go | 827 +++++++ .../libseccomp-golang/seccomp_internal.go | 461 ++++ vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 72 + .../capability/capability_linux.go | 608 +++++ .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 264 ++ .../gocapability/capability/enum_gen.go | 129 + .../gocapability/capability/syscall_linux.go | 145 ++ .../github.com/tchap/go-patricia/.gitignore | 25 + vendor/github.com/tchap/go-patricia/AUTHORS | 3 + vendor/github.com/tchap/go-patricia/LICENSE | 20 + vendor/github.com/tchap/go-patricia/README.md | 123 + .../tchap/go-patricia/patricia/children.go | 244 ++ .../tchap/go-patricia/patricia/patricia.go | 467 ++++ vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + .../x/net/http2/client_conn_pool.go | 225 ++ .../x/net/http2/configure_transport.go | 89 + vendor/golang.org/x/net/http2/errors.go | 122 + vendor/golang.org/x/net/http2/fixed_buffer.go | 60 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/frame.go | 1496 +++++++++++ vendor/golang.org/x/net/http2/go15.go | 11 + vendor/golang.org/x/net/http2/gotrack.go | 170 ++ vendor/golang.org/x/net/http2/headermap.go | 78 + vendor/golang.org/x/net/http2/hpack/encode.go | 251 ++ vendor/golang.org/x/net/http2/hpack/hpack.go | 542 ++++ .../golang.org/x/net/http2/hpack/huffman.go | 190 ++ vendor/golang.org/x/net/http2/hpack/tables.go | 352 +++ vendor/golang.org/x/net/http2/http2.go | 463 ++++ vendor/golang.org/x/net/http2/not_go15.go | 11 + vendor/golang.org/x/net/http2/not_go16.go | 13 + vendor/golang.org/x/net/http2/pipe.go | 147 ++ vendor/golang.org/x/net/http2/server.go | 2178 +++++++++++++++++ vendor/golang.org/x/net/http2/transport.go | 1667 +++++++++++++ vendor/golang.org/x/net/http2/write.go | 262 ++ vendor/golang.org/x/net/http2/writesched.go | 283 +++ .../x/net/internal/timeseries/timeseries.go | 525 ++++ vendor/golang.org/x/net/trace/events.go | 524 ++++ vendor/golang.org/x/net/trace/histogram.go | 356 +++ vendor/golang.org/x/net/trace/trace.go | 1062 ++++++++ vendor/golang.org/x/net/websocket/client.go | 113 + vendor/golang.org/x/net/websocket/hybi.go | 586 +++++ vendor/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 414 ++++ vendor/google.golang.org/grpc/.travis.yml | 17 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 46 + vendor/google.golang.org/grpc/LICENSE | 28 + vendor/google.golang.org/grpc/Makefile | 51 + vendor/google.golang.org/grpc/PATENTS | 22 + vendor/google.golang.org/grpc/README.md | 32 + vendor/google.golang.org/grpc/backoff.go | 80 + vendor/google.golang.org/grpc/call.go | 191 ++ vendor/google.golang.org/grpc/clientconn.go | 623 +++++ vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 16 + vendor/google.golang.org/grpc/codes/codes.go | 159 ++ vendor/google.golang.org/grpc/coverage.sh | 47 + .../grpc/credentials/credentials.go | 226 ++ vendor/google.golang.org/grpc/doc.go | 6 + .../google.golang.org/grpc/grpclog/logger.go | 93 + vendor/google.golang.org/grpc/interceptor.go | 74 + .../grpc/internal/internal.go | 49 + .../grpc/metadata/metadata.go | 134 + .../google.golang.org/grpc/naming/naming.go | 73 + vendor/google.golang.org/grpc/peer/peer.go | 65 + vendor/google.golang.org/grpc/picker.go | 243 ++ vendor/google.golang.org/grpc/rpc_util.go | 418 ++++ vendor/google.golang.org/grpc/server.go | 782 ++++++ vendor/google.golang.org/grpc/stream.go | 414 ++++ vendor/google.golang.org/grpc/trace.go | 120 + .../grpc/transport/control.go | 210 ++ .../grpc/transport/handler_server.go | 383 +++ .../grpc/transport/http2_client.go | 914 +++++++ .../grpc/transport/http2_server.go | 735 ++++++ .../grpc/transport/http_util.go | 411 ++++ .../grpc/transport/transport.go | 508 ++++ vendor/gopkg.in/fsnotify.v1/.gitignore | 6 + vendor/gopkg.in/fsnotify.v1/.travis.yml | 15 + vendor/gopkg.in/fsnotify.v1/AUTHORS | 34 + vendor/gopkg.in/fsnotify.v1/CHANGELOG.md | 263 ++ vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 + vendor/gopkg.in/fsnotify.v1/LICENSE | 28 + .../gopkg.in/fsnotify.v1/NotUsed.xcworkspace | 0 vendor/gopkg.in/fsnotify.v1/README.md | 59 + vendor/gopkg.in/fsnotify.v1/circle.yml | 26 + vendor/gopkg.in/fsnotify.v1/fsnotify.go | 62 + vendor/gopkg.in/fsnotify.v1/inotify.go | 306 +++ vendor/gopkg.in/fsnotify.v1/inotify_poller.go | 186 ++ vendor/gopkg.in/fsnotify.v1/kqueue.go | 463 ++++ vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 + .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 + vendor/gopkg.in/fsnotify.v1/windows.go | 561 +++++ 832 files changed, 125590 insertions(+), 26 deletions(-) create mode 100644 vendor/github.com/RackSec/srslog/.gitignore create mode 100644 vendor/github.com/RackSec/srslog/.travis.yml create mode 100644 vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/RackSec/srslog/LICENSE create mode 100644 vendor/github.com/RackSec/srslog/README.md create mode 100644 vendor/github.com/RackSec/srslog/constants.go create mode 100644 vendor/github.com/RackSec/srslog/dialer.go create mode 100644 vendor/github.com/RackSec/srslog/formatter.go create mode 100644 vendor/github.com/RackSec/srslog/framer.go create mode 100644 vendor/github.com/RackSec/srslog/net_conn.go create mode 100644 vendor/github.com/RackSec/srslog/srslog.go create mode 100644 vendor/github.com/RackSec/srslog/srslog_unix.go create mode 100644 vendor/github.com/RackSec/srslog/writer.go create mode 100644 vendor/github.com/boltdb/bolt/.gitignore create mode 100644 vendor/github.com/boltdb/bolt/LICENSE create mode 100644 vendor/github.com/boltdb/bolt/Makefile create mode 100644 vendor/github.com/boltdb/bolt/README.md create mode 100644 vendor/github.com/boltdb/bolt/appveyor.yml create mode 100644 vendor/github.com/boltdb/bolt/bolt_386.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_amd64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_arm64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_linux.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_openbsd.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_ppc64le.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_s390x.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_unix_solaris.go create mode 100644 vendor/github.com/boltdb/bolt/bolt_windows.go create mode 100644 vendor/github.com/boltdb/bolt/boltsync_unix.go create mode 100644 vendor/github.com/boltdb/bolt/bucket.go create mode 100644 vendor/github.com/boltdb/bolt/cursor.go create mode 100644 vendor/github.com/boltdb/bolt/db.go create mode 100644 vendor/github.com/boltdb/bolt/doc.go create mode 100644 vendor/github.com/boltdb/bolt/errors.go create mode 100644 vendor/github.com/boltdb/bolt/freelist.go create mode 100644 vendor/github.com/boltdb/bolt/node.go create mode 100644 vendor/github.com/boltdb/bolt/page.go create mode 100644 vendor/github.com/boltdb/bolt/tx.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/.gitignore create mode 100644 vendor/github.com/cloudfoundry/gosigar/.travis.yml create mode 100644 vendor/github.com/cloudfoundry/gosigar/LICENSE create mode 100644 vendor/github.com/cloudfoundry/gosigar/NOTICE create mode 100644 vendor/github.com/cloudfoundry/gosigar/README.md create mode 100644 vendor/github.com/cloudfoundry/gosigar/Vagrantfile create mode 100644 vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_format.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_interface.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_linux.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_unix.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_util.go create mode 100644 vendor/github.com/cloudfoundry/gosigar/sigar_windows.go create mode 100644 vendor/github.com/coreos/go-systemd/.travis.yml create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/README.md create mode 100644 vendor/github.com/coreos/go-systemd/activation/files.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/listeners.go create mode 100644 vendor/github.com/coreos/go-systemd/activation/packetconns.go create mode 100644 vendor/github.com/coreos/go-systemd/daemon/sdnotify.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100755 vendor/github.com/coreos/go-systemd/test create mode 100644 vendor/github.com/coreos/go-systemd/util/util.go create mode 100644 vendor/github.com/docker/containerd/api/grpc/server/server.go create mode 100644 vendor/github.com/docker/containerd/api/grpc/server/server_linux.go create mode 100644 vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go create mode 100644 vendor/github.com/docker/containerd/api/grpc/types/api.pb.go create mode 100644 vendor/github.com/docker/containerd/api/grpc/types/api.proto create mode 100644 vendor/github.com/docker/containerd/archutils/epoll.go create mode 100644 vendor/github.com/docker/containerd/archutils/epoll_arm64.go create mode 100644 vendor/github.com/docker/containerd/runtime/container.go create mode 100644 vendor/github.com/docker/containerd/runtime/container_linux.go create mode 100644 vendor/github.com/docker/containerd/runtime/container_solaris.go create mode 100644 vendor/github.com/docker/containerd/runtime/direct_process.go create mode 100644 vendor/github.com/docker/containerd/runtime/process.go create mode 100644 vendor/github.com/docker/containerd/runtime/runtime.go create mode 100644 vendor/github.com/docker/containerd/runtime/stats.go create mode 100644 vendor/github.com/docker/containerd/specs/spec_linux.go create mode 100644 vendor/github.com/docker/containerd/specs/spec_solaris.go create mode 100644 vendor/github.com/docker/containerd/supervisor/add_process.go create mode 100644 vendor/github.com/docker/containerd/supervisor/checkpoint.go create mode 100644 vendor/github.com/docker/containerd/supervisor/create.go create mode 100644 vendor/github.com/docker/containerd/supervisor/create_solaris.go create mode 100644 vendor/github.com/docker/containerd/supervisor/delete.go create mode 100644 vendor/github.com/docker/containerd/supervisor/errors.go create mode 100644 vendor/github.com/docker/containerd/supervisor/exit.go create mode 100644 vendor/github.com/docker/containerd/supervisor/get_containers.go create mode 100644 vendor/github.com/docker/containerd/supervisor/machine.go create mode 100644 vendor/github.com/docker/containerd/supervisor/machine_solaris.go create mode 100644 vendor/github.com/docker/containerd/supervisor/metrics.go create mode 100644 vendor/github.com/docker/containerd/supervisor/monitor_linux.go create mode 100644 vendor/github.com/docker/containerd/supervisor/monitor_solaris.go create mode 100644 vendor/github.com/docker/containerd/supervisor/oom.go create mode 100644 vendor/github.com/docker/containerd/supervisor/signal.go create mode 100644 vendor/github.com/docker/containerd/supervisor/sort.go create mode 100644 vendor/github.com/docker/containerd/supervisor/stats.go create mode 100644 vendor/github.com/docker/containerd/supervisor/supervisor.go create mode 100644 vendor/github.com/docker/containerd/supervisor/task.go create mode 100644 vendor/github.com/docker/containerd/supervisor/types.go create mode 100644 vendor/github.com/docker/containerd/supervisor/update.go create mode 100644 vendor/github.com/docker/containerd/supervisor/worker.go create mode 100644 vendor/github.com/docker/containerd/version.go create mode 100644 vendor/github.com/docker/distribution/manifest/doc.go create mode 100644 vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/config_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/sign.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema1/verify.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/builder.go create mode 100644 vendor/github.com/docker/distribution/manifest/schema2/manifest.go create mode 100644 vendor/github.com/docker/distribution/manifest/versioned.go create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/client/attach.go create mode 100644 vendor/github.com/docker/docker/api/client/build.go create mode 100644 vendor/github.com/docker/docker/api/client/cli.go create mode 100644 vendor/github.com/docker/docker/api/client/client.go create mode 100644 vendor/github.com/docker/docker/api/client/commit.go create mode 100644 vendor/github.com/docker/docker/api/client/cp.go create mode 100644 vendor/github.com/docker/docker/api/client/create.go create mode 100644 vendor/github.com/docker/docker/api/client/diff.go create mode 100644 vendor/github.com/docker/docker/api/client/events.go create mode 100644 vendor/github.com/docker/docker/api/client/exec.go create mode 100644 vendor/github.com/docker/docker/api/client/export.go create mode 100644 vendor/github.com/docker/docker/api/client/formatter/custom.go create mode 100644 vendor/github.com/docker/docker/api/client/formatter/formatter.go create mode 100644 vendor/github.com/docker/docker/api/client/hijack.go create mode 100644 vendor/github.com/docker/docker/api/client/history.go create mode 100644 vendor/github.com/docker/docker/api/client/images.go create mode 100644 vendor/github.com/docker/docker/api/client/import.go create mode 100644 vendor/github.com/docker/docker/api/client/info.go create mode 100644 vendor/github.com/docker/docker/api/client/inspect.go create mode 100644 vendor/github.com/docker/docker/api/client/inspect/inspector.go create mode 100644 vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go create mode 100644 vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go create mode 100644 vendor/github.com/docker/docker/api/client/kill.go create mode 100644 vendor/github.com/docker/docker/api/client/load.go create mode 100644 vendor/github.com/docker/docker/api/client/login.go create mode 100644 vendor/github.com/docker/docker/api/client/logout.go create mode 100644 vendor/github.com/docker/docker/api/client/logs.go create mode 100644 vendor/github.com/docker/docker/api/client/network.go create mode 100644 vendor/github.com/docker/docker/api/client/pause.go create mode 100644 vendor/github.com/docker/docker/api/client/port.go create mode 100644 vendor/github.com/docker/docker/api/client/ps.go create mode 100644 vendor/github.com/docker/docker/api/client/pull.go create mode 100644 vendor/github.com/docker/docker/api/client/push.go create mode 100644 vendor/github.com/docker/docker/api/client/rename.go create mode 100644 vendor/github.com/docker/docker/api/client/restart.go create mode 100644 vendor/github.com/docker/docker/api/client/rm.go create mode 100644 vendor/github.com/docker/docker/api/client/rmi.go create mode 100644 vendor/github.com/docker/docker/api/client/run.go create mode 100644 vendor/github.com/docker/docker/api/client/save.go create mode 100644 vendor/github.com/docker/docker/api/client/search.go create mode 100644 vendor/github.com/docker/docker/api/client/start.go create mode 100644 vendor/github.com/docker/docker/api/client/stats.go create mode 100644 vendor/github.com/docker/docker/api/client/stats_helpers.go create mode 100644 vendor/github.com/docker/docker/api/client/stop.go create mode 100644 vendor/github.com/docker/docker/api/client/tag.go create mode 100644 vendor/github.com/docker/docker/api/client/top.go create mode 100644 vendor/github.com/docker/docker/api/client/unpause.go create mode 100644 vendor/github.com/docker/docker/api/client/update.go create mode 100644 vendor/github.com/docker/docker/api/client/utils.go create mode 100644 vendor/github.com/docker/docker/api/client/version.go create mode 100644 vendor/github.com/docker/docker/api/client/volume.go create mode 100644 vendor/github.com/docker/docker/api/client/wait.go create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/errors.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/authorization.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/cors.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/debug.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/user_agent.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/version.go create mode 100644 vendor/github.com/docker/docker/api/server/profiler.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/copy.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/exec.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/inspect.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local.go create mode 100644 vendor/github.com/docker/docker/api/server/router/router.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router_swapper.go create mode 100644 vendor/github.com/docker/docker/api/server/server.go create mode 100644 vendor/github.com/docker/docker/api/types/backend/backend.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/envVarTest create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/wordsTest create mode 100644 vendor/github.com/docker/docker/cli/cli.go create mode 100644 vendor/github.com/docker/docker/cli/client.go create mode 100644 vendor/github.com/docker/docker/cli/common.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go create mode 100644 vendor/github.com/docker/docker/container/archive.go create mode 100644 vendor/github.com/docker/docker/container/container.go create mode 100644 vendor/github.com/docker/docker/container/container_unix.go create mode 100644 vendor/github.com/docker/docker/container/history.go create mode 100644 vendor/github.com/docker/docker/container/memory_store.go create mode 100644 vendor/github.com/docker/docker/container/monitor.go create mode 100644 vendor/github.com/docker/docker/container/mounts_unix.go create mode 100644 vendor/github.com/docker/docker/container/state.go create mode 100644 vendor/github.com/docker/docker/container/state_unix.go create mode 100644 vendor/github.com/docker/docker/container/store.go create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default.go create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/archive.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/attach.go create mode 100644 vendor/github.com/docker/docker/daemon/caps/utils_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/changes.go create mode 100644 vendor/github.com/docker/docker/daemon/commit.go create mode 100644 vendor/github.com/docker/docker/daemon/config.go create mode 100644 vendor/github.com/docker/docker/daemon/config_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/config_stub.go create mode 100644 vendor/github.com/docker/docker/daemon/config_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/create.go create mode 100644 vendor/github.com/docker/docker/daemon/create_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_stub.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/delete.go create mode 100644 vendor/github.com/docker/docker/daemon/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/filter.go create mode 100644 vendor/github.com/docker/docker/daemon/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/export.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/counter.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/image_delete.go create mode 100644 vendor/github.com/docker/docker/daemon/images.go create mode 100644 vendor/github.com/docker/docker/daemon/import.go create mode 100644 vendor/github.com/docker/docker/daemon/info.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/kill.go create mode 100644 vendor/github.com/docker/docker/daemon/links.go create mode 100644 vendor/github.com/docker/docker/daemon/links/links.go create mode 100644 vendor/github.com/docker/docker/daemon/list.go create mode 100644 vendor/github.com/docker/docker/daemon/list_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/context.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/copier.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/factory.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logger.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logs.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/mounts.go create mode 100644 vendor/github.com/docker/docker/daemon/network/settings.go create mode 100644 vendor/github.com/docker/docker/daemon/network_operations.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/pause.go create mode 100644 vendor/github.com/docker/docker/daemon/rename.go create mode 100644 vendor/github.com/docker/docker/daemon/resize.go create mode 100644 vendor/github.com/docker/docker/daemon/restart.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_disabled.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/start.go create mode 100644 vendor/github.com/docker/docker/daemon/stats.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/stop.go create mode 100644 vendor/github.com/docker/docker/daemon/top_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/unpause.go create mode 100644 vendor/github.com/docker/docker/daemon/update.go create mode 100644 vendor/github.com/docker/docker/daemon/update_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/wait.go create mode 100644 vendor/github.com/docker/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/metadata.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go create mode 100644 vendor/github.com/docker/docker/distribution/pull.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_unix.go create mode 100644 vendor/github.com/docker/docker/distribution/push.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/download.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload.go create mode 100644 vendor/github.com/docker/docker/docker/README.md create mode 100644 vendor/github.com/docker/docker/docker/client.go create mode 100644 vendor/github.com/docker/docker/docker/common.go create mode 100644 vendor/github.com/docker/docker/docker/daemon.go create mode 100644 vendor/github.com/docker/docker/docker/daemon_freebsd.go create mode 100644 vendor/github.com/docker/docker/docker/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/docker/daemon_none.go create mode 100644 vendor/github.com/docker/docker/docker/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/docker/docker.go create mode 100644 vendor/github.com/docker/docker/docker/flags.go create mode 100644 vendor/github.com/docker/docker/docker/listeners/listeners.go create mode 100644 vendor/github.com/docker/docker/docker/listeners/listeners_unix.go create mode 100644 vendor/github.com/docker/docker/docker/runc.go create mode 100644 vendor/github.com/docker/docker/dockerversion/useragent.go create mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go create mode 100644 vendor/github.com/docker/docker/errors/errors.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/load.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/save.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/tarexport.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/queue_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_linux.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/api.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/plugin.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/response.go create mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/filenotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/README.md create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/platform.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_int8.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go create mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher.go create mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/README.md create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile.go create mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/docker/docker/pkg/useragent/README.md create mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent.go create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/apparmor.go create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/template.go create mode 100755 vendor/github.com/docker/docker/profiles/seccomp/default.json create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/generate.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager.go create mode 100644 vendor/github.com/docker/docker/runconfig/compare.go create mode 100644 vendor/github.com/docker/docker/runconfig/config.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/errors.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/streams.go create mode 100644 vendor/github.com/docker/docker/utils/debug.go create mode 100644 vendor/github.com/docker/docker/utils/experimental.go create mode 100644 vendor/github.com/docker/docker/utils/names.go create mode 100644 vendor/github.com/docker/docker/utils/process_unix.go create mode 100644 vendor/github.com/docker/docker/utils/stubs.go create mode 100644 vendor/github.com/docker/docker/utils/templates/templates.go create mode 100644 vendor/github.com/docker/docker/utils/utils.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/adapter.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy.go create mode 100644 vendor/github.com/docker/docker/volume/local/local.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_unix.go create mode 100644 vendor/github.com/docker/docker/volume/store/errors.go create mode 100644 vendor/github.com/docker/docker/volume/store/store.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unix.go create mode 100644 vendor/github.com/docker/engine-api/types/events/events.go create mode 100644 vendor/github.com/docker/engine-api/types/versions/README.md create mode 100644 vendor/github.com/docker/engine-api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/docker/engine-api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/docker/libtrust/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/libtrust/LICENSE create mode 100644 vendor/github.com/docker/libtrust/MAINTAINERS create mode 100644 vendor/github.com/docker/libtrust/README.md create mode 100644 vendor/github.com/docker/libtrust/certificates.go create mode 100644 vendor/github.com/docker/libtrust/doc.go create mode 100644 vendor/github.com/docker/libtrust/ec_key.go create mode 100644 vendor/github.com/docker/libtrust/filter.go create mode 100644 vendor/github.com/docker/libtrust/hash.go create mode 100644 vendor/github.com/docker/libtrust/jsonsign.go create mode 100644 vendor/github.com/docker/libtrust/key.go create mode 100644 vendor/github.com/docker/libtrust/key_files.go create mode 100644 vendor/github.com/docker/libtrust/key_manager.go create mode 100644 vendor/github.com/docker/libtrust/rsa_key.go create mode 100644 vendor/github.com/docker/libtrust/util.go create mode 100644 vendor/github.com/godbus/dbus/CONTRIBUTING.md create mode 100644 vendor/github.com/godbus/dbus/LICENSE create mode 100644 vendor/github.com/godbus/dbus/MAINTAINERS create mode 100644 vendor/github.com/godbus/dbus/README.markdown create mode 100644 vendor/github.com/godbus/dbus/auth.go create mode 100644 vendor/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/github.com/godbus/dbus/call.go create mode 100644 vendor/github.com/godbus/dbus/conn.go create mode 100644 vendor/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/github.com/godbus/dbus/dbus.go create mode 100644 vendor/github.com/godbus/dbus/decoder.go create mode 100644 vendor/github.com/godbus/dbus/doc.go create mode 100644 vendor/github.com/godbus/dbus/encoder.go create mode 100644 vendor/github.com/godbus/dbus/export.go create mode 100644 vendor/github.com/godbus/dbus/homedir.go create mode 100644 vendor/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/github.com/godbus/dbus/message.go create mode 100644 vendor/github.com/godbus/dbus/object.go create mode 100644 vendor/github.com/godbus/dbus/sig.go create mode 100644 vendor/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/godbus/dbus/transport_unixcred_linux.go create mode 100644 vendor/github.com/godbus/dbus/variant.go create mode 100644 vendor/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/github.com/golang/protobuf/.gitignore create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/Make.protobuf create mode 100644 vendor/github.com/golang/protobuf/Makefile create mode 100644 vendor/github.com/golang/protobuf/README.md create mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/opencontainers/runc/checkpoint.go create mode 100644 vendor/github.com/opencontainers/runc/delete.go create mode 100644 vendor/github.com/opencontainers/runc/events.go create mode 100644 vendor/github.com/opencontainers/runc/exec.go create mode 100644 vendor/github.com/opencontainers/runc/kill.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_nosystemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/apply_systemd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/network.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/console_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/container_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/Makefile create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.pb.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/criurpc/criurpc.proto create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/devices/number.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/generic_error.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/keys/keyctl.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/label/label.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/message_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/network_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/README.md create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_gccgo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsenter_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/process_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/restored_process.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/config.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/specconv/spec_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/capture.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/frame.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stacktrace/stacktrace.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/state_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/proc.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go create mode 100644 vendor/github.com/opencontainers/runc/list.go create mode 100644 vendor/github.com/opencontainers/runc/main.go create mode 100644 vendor/github.com/opencontainers/runc/main_unix.go create mode 100644 vendor/github.com/opencontainers/runc/main_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/pause.go create mode 100644 vendor/github.com/opencontainers/runc/restore.go create mode 100644 vendor/github.com/opencontainers/runc/rlimit_linux.go create mode 100644 vendor/github.com/opencontainers/runc/signals.go create mode 100644 vendor/github.com/opencontainers/runc/spec.go create mode 100644 vendor/github.com/opencontainers/runc/start.go create mode 100644 vendor/github.com/opencontainers/runc/state.go create mode 100644 vendor/github.com/opencontainers/runc/tty.go create mode 100644 vendor/github.com/opencontainers/runc/utils.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-spec/.travis.yml create mode 100644 vendor/github.com/opencontainers/runtime-spec/ChangeLog create mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-spec/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/runtime-spec/Makefile create mode 100644 vendor/github.com/opencontainers/runtime-spec/README.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/ROADMAP.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/bundle.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/code-of-conduct.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/config-linux.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/config.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/glossary.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/implementations.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/principles.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/project.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/runtime-linux.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/runtime.md create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/style.md create mode 100644 vendor/github.com/opencontainers/specs/.gitignore create mode 100644 vendor/github.com/opencontainers/specs/.travis.yml create mode 100644 vendor/github.com/opencontainers/specs/ChangeLog create mode 100644 vendor/github.com/opencontainers/specs/LICENSE create mode 100644 vendor/github.com/opencontainers/specs/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/specs/Makefile create mode 100644 vendor/github.com/opencontainers/specs/README.md create mode 100644 vendor/github.com/opencontainers/specs/ROADMAP.md create mode 100644 vendor/github.com/opencontainers/specs/bundle.md create mode 100644 vendor/github.com/opencontainers/specs/code-of-conduct.md create mode 100644 vendor/github.com/opencontainers/specs/config-linux.md create mode 100644 vendor/github.com/opencontainers/specs/config.md create mode 100644 vendor/github.com/opencontainers/specs/glossary.md create mode 100644 vendor/github.com/opencontainers/specs/implementations.md create mode 100644 vendor/github.com/opencontainers/specs/principles.md create mode 100644 vendor/github.com/opencontainers/specs/project.md create mode 100644 vendor/github.com/opencontainers/specs/runtime-linux.md create mode 100644 vendor/github.com/opencontainers/specs/runtime.md create mode 100644 vendor/github.com/opencontainers/specs/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/specs/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/specs/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/specs/style.md create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100755 vendor/github.com/rcrowley/go-metrics/validate.sh create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/LICENSE create mode 100644 vendor/github.com/seccomp/libseccomp-golang/README create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp.go create mode 100644 vendor/github.com/seccomp/libseccomp-golang/seccomp_internal.go create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/tchap/go-patricia/.gitignore create mode 100644 vendor/github.com/tchap/go-patricia/AUTHORS create mode 100644 vendor/github.com/tchap/go-patricia/LICENSE create mode 100644 vendor/github.com/tchap/go-patricia/README.md create mode 100644 vendor/github.com/tchap/go-patricia/patricia/children.go create mode 100644 vendor/github.com/tchap/go-patricia/patricia/patricia.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/configure_transport.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/fixed_buffer.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/go15.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/not_go15.go create mode 100644 vendor/golang.org/x/net/http2/not_go16.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/net/websocket/client.go create mode 100644 vendor/golang.org/x/net/websocket/hybi.go create mode 100644 vendor/golang.org/x/net/websocket/server.go create mode 100644 vendor/golang.org/x/net/websocket/websocket.go create mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/PATENTS create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100755 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100755 vendor/google.golang.org/grpc/coverage.sh create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/transport/control.go create mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/transport/transport.go create mode 100644 vendor/gopkg.in/fsnotify.v1/.gitignore create mode 100644 vendor/gopkg.in/fsnotify.v1/.travis.yml create mode 100644 vendor/gopkg.in/fsnotify.v1/AUTHORS create mode 100644 vendor/gopkg.in/fsnotify.v1/CHANGELOG.md create mode 100644 vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/fsnotify.v1/LICENSE create mode 100644 vendor/gopkg.in/fsnotify.v1/NotUsed.xcworkspace create mode 100644 vendor/gopkg.in/fsnotify.v1/README.md create mode 100644 vendor/gopkg.in/fsnotify.v1/circle.yml create mode 100644 vendor/gopkg.in/fsnotify.v1/fsnotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify.go create mode 100644 vendor/gopkg.in/fsnotify.v1/inotify_poller.go create mode 100644 vendor/gopkg.in/fsnotify.v1/kqueue.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go create mode 100644 vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go create mode 100644 vendor/gopkg.in/fsnotify.v1/windows.go diff --git a/trash.conf b/trash.conf index 52f1e194..a8ea6ae6 100644 --- a/trash.conf +++ b/trash.conf @@ -9,9 +9,9 @@ github.com/coreos/coreos-cloudinit v1.11.0-3-gb1c1753 https://github.com/rancher github.com/coreos/go-systemd v4 github.com/coreos/yaml 6b16a5714269b2f70720a45406b1babd947a17ef github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d -github.com/docker/containerd ebb6d97f443fdcbf7084e41356359c99421d93f5 https://github.com/ibuildthecloud/containerd.git +github.com/docker/containerd 8c538d6b92f2c512c3ad8a854826461ac9095b74 https://github.com/ibuildthecloud/containerd.git github.com/docker/distribution 467fc068d88aa6610691b7f1a677271a3fac4aac -github.com/docker/docker system-docker-1.11.1 https://github.com/ibuildthecloud/docker.git +github.com/docker/docker 8ba9ee769ba6c451e1d2abf05368580323201667 https://github.com/rancher/docker.git github.com/docker/engine-api v0.3.3 github.com/docker/go-connections v0.2.0 github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 @@ -49,6 +49,5 @@ github.com/xeipuuv/gojsonschema ac452913faa25c08bb78810d3e6f88b8a39f8f25 golang.org/x/crypto 2f3083f6163ef51179ad42ed523a18c9a1141467 golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git -google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git gopkg.in/fsnotify.v1 v1.2.0 diff --git a/vendor/github.com/RackSec/srslog/.gitignore b/vendor/github.com/RackSec/srslog/.gitignore new file mode 100644 index 00000000..ebf0f2e4 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.gitignore @@ -0,0 +1 @@ +.cover diff --git a/vendor/github.com/RackSec/srslog/.travis.yml b/vendor/github.com/RackSec/srslog/.travis.yml new file mode 100644 index 00000000..4e5c4f07 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/.travis.yml @@ -0,0 +1,18 @@ +sudo: required +dist: trusty +group: edge +language: go +go: +- 1.5 +before_install: + - pip install --user codecov +script: +- | + go get ./... + go test -v -coverprofile=coverage.txt -covermode=atomic + go vet +after_success: + - codecov +notifications: + slack: + secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= diff --git a/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..18ac49fc --- /dev/null +++ b/vendor/github.com/RackSec/srslog/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer at [sirsean@gmail.com]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/github.com/RackSec/srslog/LICENSE b/vendor/github.com/RackSec/srslog/LICENSE new file mode 100644 index 00000000..9269338f --- /dev/null +++ b/vendor/github.com/RackSec/srslog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/RackSec/srslog/README.md b/vendor/github.com/RackSec/srslog/README.md new file mode 100644 index 00000000..1ae1fd4e --- /dev/null +++ b/vendor/github.com/RackSec/srslog/README.md @@ -0,0 +1,131 @@ +[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) + +# srslog + +Go has a `syslog` package in the standard library, but it has the following +shortcomings: + +1. It doesn't have TLS support +2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) + +I agree that it doesn't need to be in the standard library. So, I've +followed Brad's suggestion and have made a separate project to handle syslog. + +This code was taken directly from the Go project as a base to start from. + +However, this _does_ have TLS support. + +# Usage + +Basic usage retains the same interface as the original `syslog` package. We +only added to the interface where required to support new functionality. + +Switch from the standard library: + +``` +import( + //"log/syslog" + syslog "github.com/RackSec/srslog" +) +``` + +You can still use it for local syslog: + +``` +w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted UDP: + +``` +w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") +``` + +Or to unencrypted TCP: + +``` +w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") +``` + +But now you can also send messages via TLS-encrypted TCP: + +``` +w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") +``` + +And if you need more control over your TLS configuration : + +``` +pool := x509.NewCertPool() +serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") +if err != nil { + return nil, err +} +pool.AppendCertsFromPEM(serverCert) +config := tls.Config{ + RootCAs: pool, +} + +w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) +``` + +(Note that in both TLS cases, this uses a self-signed certificate, where the +remote syslog server has the keypair and the client has only the public key.) + +And then to write log messages, continue like so: + +``` +if err != nil { + log.Fatal("failed to connect to syslog:", err) +} +defer w.Close() + +w.Alert("this is an alert") +w.Crit("this is critical") +w.Err("this is an error") +w.Warning("this is a warning") +w.Notice("this is a notice") +w.Info("this is info") +w.Debug("this is debug") +w.Write([]byte("these are some bytes")) +``` + +# Generating TLS Certificates + +We've provided a script that you can use to generate a self-signed keypair: + +``` +pip install cryptography +python script/gen-certs.py +``` + +That outputs the public key and private key to standard out. Put those into +`.pem` files. (And don't put them into any source control. The certificate in +the `test` directory is used by the unit tests, and please do not actually use +it anywhere else.) + +# Running Tests + +Run the tests as usual: + +``` +go test +``` + +But we've also provided a test coverage script that will show you which +lines of code are not covered: + +``` +script/coverage --html +``` + +That will open a new browser tab showing coverage information. + +# License + +This project uses the New BSD License, the same as the Go project itself. + +# Code of Conduct + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. diff --git a/vendor/github.com/RackSec/srslog/constants.go b/vendor/github.com/RackSec/srslog/constants.go new file mode 100644 index 00000000..600801ee --- /dev/null +++ b/vendor/github.com/RackSec/srslog/constants.go @@ -0,0 +1,68 @@ +package srslog + +import ( + "errors" +) + +// Priority is a combination of the syslog facility and +// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity +// message from the FTP facility. The default severity is LOG_EMERG; +// the default facility is LOG_KERN. +type Priority int + +const severityMask = 0x07 +const facilityMask = 0xf8 + +const ( + // Severity. + + // From /usr/include/sys/syslog.h. + // These are the same on Linux, BSD, and OS X. + LOG_EMERG Priority = iota + LOG_ALERT + LOG_CRIT + LOG_ERR + LOG_WARNING + LOG_NOTICE + LOG_INFO + LOG_DEBUG +) + +const ( + // Facility. + + // From /usr/include/sys/syslog.h. + // These are the same up to LOG_FTP on Linux, BSD, and OS X. + LOG_KERN Priority = iota << 3 + LOG_USER + LOG_MAIL + LOG_DAEMON + LOG_AUTH + LOG_SYSLOG + LOG_LPR + LOG_NEWS + LOG_UUCP + LOG_CRON + LOG_AUTHPRIV + LOG_FTP + _ // unused + _ // unused + _ // unused + _ // unused + LOG_LOCAL0 + LOG_LOCAL1 + LOG_LOCAL2 + LOG_LOCAL3 + LOG_LOCAL4 + LOG_LOCAL5 + LOG_LOCAL6 + LOG_LOCAL7 +) + +func validatePriority(p Priority) error { + if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { + return errors.New("log/syslog: invalid priority") + } else { + return nil + } +} diff --git a/vendor/github.com/RackSec/srslog/dialer.go b/vendor/github.com/RackSec/srslog/dialer.go new file mode 100644 index 00000000..47a7b2be --- /dev/null +++ b/vendor/github.com/RackSec/srslog/dialer.go @@ -0,0 +1,87 @@ +package srslog + +import ( + "crypto/tls" + "net" +) + +// dialerFunctionWrapper is a simple object that consists of a dialer function +// and its name. This is primarily for testing, so we can make sure that the +// getDialer method returns the correct dialer function. However, if you ever +// find that you need to check which dialer function you have, this would also +// be useful for you without having to use reflection. +type dialerFunctionWrapper struct { + Name string + Dialer func() (serverConn, string, error) +} + +// Call the wrapped dialer function and return its return values. +func (df dialerFunctionWrapper) Call() (serverConn, string, error) { + return df.Dialer() +} + +// getDialer returns a "dialer" function that can be called to connect to a +// syslog server. +// +// Each dialer function is responsible for dialing the remote host and returns +// a serverConn, the hostname (or a default if the Writer has not specified a +// hostname), and an error in case dialing fails. +// +// The reason for separate dialers is that different network types may need +// to dial their connection differently, yet still provide a net.Conn interface +// that you can use once they have dialed. Rather than an increasingly long +// conditional, we have a map of network -> dialer function (with a sane default +// value), and adding a new network type is as easy as writing the dialer +// function and adding it to the map. +func (w *Writer) getDialer() dialerFunctionWrapper { + dialers := map[string]dialerFunctionWrapper{ + "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, + "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, + } + dialer, ok := dialers[w.network] + if !ok { + dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} + } + return dialer +} + +// unixDialer uses the unixSyslog method to open a connection to the syslog +// daemon running on the local machine. +func (w *Writer) unixDialer() (serverConn, string, error) { + sc, err := unixSyslog() + hostname := w.hostname + if hostname == "" { + hostname = "localhost" + } + return sc, hostname, err +} + +// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network +// type. +func (w *Writer) tlsDialer() (serverConn, string, error) { + c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} + +// basicDialer is the most common dialer for syslog, and supports both TCP and +// UDP connections. +func (w *Writer) basicDialer() (serverConn, string, error) { + c, err := net.Dial(w.network, w.raddr) + var sc serverConn + hostname := w.hostname + if err == nil { + sc = &netConn{conn: c} + if hostname == "" { + hostname = c.LocalAddr().String() + } + } + return sc, hostname, err +} diff --git a/vendor/github.com/RackSec/srslog/formatter.go b/vendor/github.com/RackSec/srslog/formatter.go new file mode 100644 index 00000000..2a746251 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/formatter.go @@ -0,0 +1,48 @@ +package srslog + +import ( + "fmt" + "os" + "time" +) + +// Formatter is a type of function that takes the consituent parts of a +// syslog message and returns a formatted string. A different Formatter is +// defined for each different syslog protocol we support. +type Formatter func(p Priority, hostname, tag, content string) string + +// DefaultFormatter is the original format supported by the Go syslog package, +// and is a non-compliant amalgamation of 3164 and 5424 that is intended to +// maximize compatibility. +func DefaultFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// UnixFormatter omits the hostname, because it is only used locally. +func UnixFormatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d>%s %s[%d]: %s", + p, timestamp, tag, os.Getpid(), content) + return msg +} + +// RFC3164Formatter provides an RFC 3164 compliant message. +func RFC3164Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.Stamp) + msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", + p, timestamp, hostname, tag, os.Getpid(), content) + return msg +} + +// RFC5424Formatter provides an RFC 5424 compliant message. +func RFC5424Formatter(p Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + appName := os.Args[0] + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, appName, pid, tag, content) + return msg +} diff --git a/vendor/github.com/RackSec/srslog/framer.go b/vendor/github.com/RackSec/srslog/framer.go new file mode 100644 index 00000000..ab46f0de --- /dev/null +++ b/vendor/github.com/RackSec/srslog/framer.go @@ -0,0 +1,24 @@ +package srslog + +import ( + "fmt" +) + +// Framer is a type of function that takes an input string (typically an +// already-formatted syslog message) and applies "message framing" to it. We +// have different framers because different versions of the syslog protocol +// and its transport requirements define different framing behavior. +type Framer func(in string) string + +// DefaultFramer does nothing, since there is no framing to apply. This is +// the original behavior of the Go syslog package, and is also typically used +// for UDP syslog. +func DefaultFramer(in string) string { + return in +} + +// RFC5425MessageLengthFramer prepends the message length to the front of the +// provided message, as defined in RFC 5425. +func RFC5425MessageLengthFramer(in string) string { + return fmt.Sprintf("%d %s", len(in), in) +} diff --git a/vendor/github.com/RackSec/srslog/net_conn.go b/vendor/github.com/RackSec/srslog/net_conn.go new file mode 100644 index 00000000..75e4c3ca --- /dev/null +++ b/vendor/github.com/RackSec/srslog/net_conn.go @@ -0,0 +1,30 @@ +package srslog + +import ( + "net" +) + +// netConn has an internal net.Conn and adheres to the serverConn interface, +// allowing us to send syslog messages over the network. +type netConn struct { + conn net.Conn +} + +// writeString formats syslog messages using time.RFC3339 and includes the +// hostname, and sends the message to the connection. +func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = DefaultFormatter + } + formattedMessage := framer(formatter(p, hostname, tag, msg)) + _, err := n.conn.Write([]byte(formattedMessage)) + return err +} + +// close the network connection +func (n *netConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/srslog.go b/vendor/github.com/RackSec/srslog/srslog.go new file mode 100644 index 00000000..4469d720 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog.go @@ -0,0 +1,100 @@ +package srslog + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "os" +) + +// This interface allows us to work with both local and network connections, +// and enables Solaris support (see syslog_unix.go). +type serverConn interface { + writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error + close() error +} + +// New establishes a new connection to the system log daemon. Each +// write to the returned Writer sends a log message with the given +// priority and prefix. +func New(priority Priority, tag string) (w *Writer, err error) { + return Dial("", "", priority, tag) +} + +// Dial establishes a connection to a log daemon by connecting to +// address raddr on the specified network. Each write to the returned +// Writer sends a log message with the given facility, severity and +// tag. +// If network is empty, Dial will connect to the local syslog server. +func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { + return DialWithTLSConfig(network, raddr, priority, tag, nil) +} + +// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses certPath to load TLS certificates and configure +// the secure connection. +func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { + serverCert, err := ioutil.ReadFile(certPath) + if err != nil { + return nil, err + } + + return DialWithTLSCert(network, raddr, priority, tag, serverCert) +} + +// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses serverCert to load a TLS certificate +// and configure the secure connection. +func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(serverCert) + config := tls.Config{ + RootCAs: pool, + } + + return DialWithTLSConfig(network, raddr, priority, tag, &config) +} + +// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to +// address raddr on the specified network. It uses tlsConfig to configure the secure connection. +func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { + if err := validatePriority(priority); err != nil { + return nil, err + } + + if tag == "" { + tag = os.Args[0] + } + hostname, _ := os.Hostname() + + w := &Writer{ + priority: priority, + tag: tag, + hostname: hostname, + network: network, + raddr: raddr, + tlsConfig: tlsConfig, + } + + w.Lock() + defer w.Unlock() + + err := w.connect() + if err != nil { + return nil, err + } + return w, err +} + +// NewLogger creates a log.Logger whose output is written to +// the system log service with the specified priority. The logFlag +// argument is the flag set passed through to log.New to create +// the Logger. +func NewLogger(p Priority, logFlag int) (*log.Logger, error) { + s, err := New(p, "") + if err != nil { + return nil, err + } + return log.New(s, "", logFlag), nil +} diff --git a/vendor/github.com/RackSec/srslog/srslog_unix.go b/vendor/github.com/RackSec/srslog/srslog_unix.go new file mode 100644 index 00000000..a04d9396 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/srslog_unix.go @@ -0,0 +1,54 @@ +package srslog + +import ( + "errors" + "io" + "net" +) + +// unixSyslog opens a connection to the syslog daemon running on the +// local machine using a Unix domain socket. This function exists because of +// Solaris support as implemented by gccgo. On Solaris you can not +// simply open a TCP connection to the syslog daemon. The gccgo +// sources have a syslog_solaris.go file that implements unixSyslog to +// return a type that satisfies the serverConn interface and simply calls the C +// library syslog function. +func unixSyslog() (conn serverConn, err error) { + logTypes := []string{"unixgram", "unix"} + logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} + for _, network := range logTypes { + for _, path := range logPaths { + conn, err := net.Dial(network, path) + if err != nil { + continue + } else { + return &localConn{conn: conn}, nil + } + } + } + return nil, errors.New("Unix syslog delivery error") +} + +// localConn adheres to the serverConn interface, allowing us to send syslog +// messages to the local syslog daemon over a Unix domain socket. +type localConn struct { + conn io.WriteCloser +} + +// writeString formats syslog messages using time.Stamp instead of time.RFC3339, +// and omits the hostname (because it is expected to be used locally). +func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { + if framer == nil { + framer = DefaultFramer + } + if formatter == nil { + formatter = UnixFormatter + } + _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) + return err +} + +// close the (local) network connection +func (n *localConn) close() error { + return n.conn.Close() +} diff --git a/vendor/github.com/RackSec/srslog/writer.go b/vendor/github.com/RackSec/srslog/writer.go new file mode 100644 index 00000000..fdecaf61 --- /dev/null +++ b/vendor/github.com/RackSec/srslog/writer.go @@ -0,0 +1,164 @@ +package srslog + +import ( + "crypto/tls" + "strings" + "sync" +) + +// A Writer is a connection to a syslog server. +type Writer struct { + sync.Mutex // guards conn + + priority Priority + tag string + hostname string + network string + raddr string + tlsConfig *tls.Config + framer Framer + formatter Formatter + + conn serverConn +} + +// connect makes a connection to the syslog server. +// It must be called with w.mu held. +func (w *Writer) connect() (err error) { + if w.conn != nil { + // ignore err from close, it makes sense to continue anyway + w.conn.close() + w.conn = nil + } + + var conn serverConn + var hostname string + dialer := w.getDialer() + conn, hostname, err = dialer.Call() + if err == nil { + w.conn = conn + w.hostname = hostname + } + + return +} + +// SetFormatter changes the formatter function for subsequent messages. +func (w *Writer) SetFormatter(f Formatter) { + w.formatter = f +} + +// SetFramer changes the framer function for subsequent messages. +func (w *Writer) SetFramer(f Framer) { + w.framer = f +} + +// Write sends a log message to the syslog daemon using the default priority +// passed into `srslog.New` or the `srslog.Dial*` functions. +func (w *Writer) Write(b []byte) (int, error) { + return w.writeAndRetry(w.priority, string(b)) +} + +// Close closes a connection to the syslog daemon. +func (w *Writer) Close() error { + w.Lock() + defer w.Unlock() + + if w.conn != nil { + err := w.conn.close() + w.conn = nil + return err + } + return nil +} + +// Emerg logs a message with severity LOG_EMERG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Emerg(m string) (err error) { + _, err = w.writeAndRetry(LOG_EMERG, m) + return err +} + +// Alert logs a message with severity LOG_ALERT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Alert(m string) (err error) { + _, err = w.writeAndRetry(LOG_ALERT, m) + return err +} + +// Crit logs a message with severity LOG_CRIT; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Crit(m string) (err error) { + _, err = w.writeAndRetry(LOG_CRIT, m) + return err +} + +// Err logs a message with severity LOG_ERR; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Err(m string) (err error) { + _, err = w.writeAndRetry(LOG_ERR, m) + return err +} + +// Warning logs a message with severity LOG_WARNING; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Warning(m string) (err error) { + _, err = w.writeAndRetry(LOG_WARNING, m) + return err +} + +// Notice logs a message with severity LOG_NOTICE; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Notice(m string) (err error) { + _, err = w.writeAndRetry(LOG_NOTICE, m) + return err +} + +// Info logs a message with severity LOG_INFO; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Info(m string) (err error) { + _, err = w.writeAndRetry(LOG_INFO, m) + return err +} + +// Debug logs a message with severity LOG_DEBUG; this overrides the default +// priority passed to `srslog.New` and the `srslog.Dial*` functions. +func (w *Writer) Debug(m string) (err error) { + _, err = w.writeAndRetry(LOG_DEBUG, m) + return err +} + +func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { + pr := (w.priority & facilityMask) | (p & severityMask) + + w.Lock() + defer w.Unlock() + + if w.conn != nil { + if n, err := w.write(pr, s); err == nil { + return n, err + } + } + if err := w.connect(); err != nil { + return 0, err + } + return w.write(pr, s) +} + +// write generates and writes a syslog formatted string. It formats the +// message based on the current Formatter and Framer. +func (w *Writer) write(p Priority, msg string) (int, error) { + // ensure it ends in a \n + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + err := w.conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) + if err != nil { + return 0, err + } + // Note: return the length of the input, not the number of + // bytes printed by Fprintf, because this must behave like + // an io.Writer. + return len(msg), nil +} diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore new file mode 100644 index 00000000..c7bd2b7a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/.gitignore @@ -0,0 +1,4 @@ +*.prof +*.test +*.swp +/bin/ diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 00000000..004e77fe --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile new file mode 100644 index 00000000..e035e63a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/Makefile @@ -0,0 +1,18 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + +test: + @go test -v -cover . + @go test -v ./cmd/bolt + +.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md new file mode 100644 index 00000000..66b19ace --- /dev/null +++ b/vendor/github.com/boltdb/bolt/README.md @@ -0,0 +1,844 @@ +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) +==== + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable and the API is fixed. Full unit test coverage and randomized +black box testing are used to ensure database consistency and thread safety. +Bolt is currently in high-load production environments serving databases as +large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed +services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/boltdb/bolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/boltdb/bolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 00000000..6e26e941 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 00000000..cca6b7eb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 00000000..e659bfb9 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000..6d230935 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 00000000..2b676661 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 00000000..7058c3d7 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 00000000..645ddc3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 00000000..2dc6be02 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000..8351e129 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000..f4dd26bb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 00000000..cad62dda --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 00000000..307bf2b3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 00000000..d538e6af --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 00000000..f5044252 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 00000000..d2f8c524 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,748 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } else { + return nil, ErrIncompatibleValue + } + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 00000000..1be9f35e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 00000000..501d36aa --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,993 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + return nil, err + } + db.pageSize = int(m.pageSize) + } + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. + if err := db.meta0.validate(); err != nil { + return err + } + if err := db.meta1.validate(); err != nil { + return err + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + db.path = "" + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + db.txs = append(db.txs[:i], db.txs[i+1:]...) + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + if db.meta0.txid > db.meta1.txid { + return db.meta0 + } + return db.meta1 +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + buf := make([]byte, count*db.pageSize) + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = other.TxN - s.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } else if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 00000000..cc937845 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 00000000..6883786d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,70 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when a data file is not a Bolt-formatted database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 00000000..0161948f --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,242 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// all returns a list of all free ids and all pending ids in one sorted list. +func (f *freelist) all() []pgid { + m := make(pgids, 0) + + for _, list := range f.pending { + m = append(m, list...) + } + + sort.Sort(m) + return pgids(f.ids).merge(m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + ids := f.all() + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + if len(ids) < 0xFFFF { + p.count = uint16(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) + copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 00000000..e9d64af8 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,599 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 00000000..818aa1b1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,172 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } else if len(b) == 0 { + return a + } + + // Create a list to hold all elements from both lists. + merged := make(pgids, 0, len(a)+len(b)) + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + merged = append(merged, follow...) + + return merged +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 00000000..b8510fdb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,666 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + for _, id := range tx.db.freelist.all() { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Clear out page cache. + tx.pages = make(map[pgid]*page) + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/cloudfoundry/gosigar/.gitignore b/vendor/github.com/cloudfoundry/gosigar/.gitignore new file mode 100644 index 00000000..8000dd9d --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.gitignore @@ -0,0 +1 @@ +.vagrant diff --git a/vendor/github.com/cloudfoundry/gosigar/.travis.yml b/vendor/github.com/cloudfoundry/gosigar/.travis.yml new file mode 100644 index 00000000..2a9c5d0c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.2 + +install: + - 'go install github.com/onsi/ginkgo/ginkgo' +script: 'ginkgo -r' diff --git a/vendor/github.com/cloudfoundry/gosigar/LICENSE b/vendor/github.com/cloudfoundry/gosigar/LICENSE new file mode 100644 index 00000000..11069edd --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/cloudfoundry/gosigar/NOTICE b/vendor/github.com/cloudfoundry/gosigar/NOTICE new file mode 100644 index 00000000..fda553b5 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/NOTICE @@ -0,0 +1,9 @@ +Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved. + +This product is licensed to you under the Apache License, Version 2.0 (the "License"). +You may not use this product except in compliance with the License. + +This product includes a number of subcomponents with +separate copyright notices and license terms. Your use of these +subcomponents is subject to the terms and conditions of the +subcomponent's license, as noted in the LICENSE file. \ No newline at end of file diff --git a/vendor/github.com/cloudfoundry/gosigar/README.md b/vendor/github.com/cloudfoundry/gosigar/README.md new file mode 100644 index 00000000..90d51f9b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/README.md @@ -0,0 +1,22 @@ +# Go sigar + +## Overview + +Go sigar is a golang implementation of the +[sigar API](https://github.com/hyperic/sigar). The Go version of +sigar has a very similar interface, but is being written from scratch +in pure go/cgo, rather than cgo bindings for libsigar. + +## Test drive + + $ go get github.com/cloudfoundry/gosigar + $ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples + $ go run uptime.go + +## Supported platforms + +Currently targeting modern flavors of darwin and linux. + +## License + +Apache 2.0 diff --git a/vendor/github.com/cloudfoundry/gosigar/Vagrantfile b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile new file mode 100644 index 00000000..6fd990c1 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/Vagrantfile @@ -0,0 +1,25 @@ +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "hashicorp/precise64" + config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go" + config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar" + config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go" + install_go = <<-BASH + set -e + +if [ ! -d "/usr/local/go" ]; then + cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz + cd /usr/local + tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz + echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc +fi +export GOPATH=/home/vagrant/go +export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin +/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo +/usr/local/go/bin/go get -u github.com/onsi/gomega; +BASH + config.vm.provision "shell", inline: 'apt-get install -y git-core' + config.vm.provision "shell", inline: install_go +end diff --git a/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go new file mode 100644 index 00000000..0e80aa4b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/concrete_sigar.go @@ -0,0 +1,69 @@ +package sigar + +import ( + "time" +) + +type ConcreteSigar struct{} + +func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) { + // samplesCh is buffered to 1 value to immediately return first CPU sample + samplesCh := make(chan Cpu, 1) + + stopCh := make(chan struct{}) + + go func() { + var cpuUsage Cpu + + // Immediately provide non-delta value. + // samplesCh is buffered to 1 value, so it will not block. + cpuUsage.Get() + samplesCh <- cpuUsage + + ticker := time.NewTicker(collectionInterval) + + for { + select { + case <-ticker.C: + previousCpuUsage := cpuUsage + + cpuUsage.Get() + + select { + case samplesCh <- cpuUsage.Delta(previousCpuUsage): + default: + // Include default to avoid channel blocking + } + + case <-stopCh: + return + } + } + }() + + return samplesCh, stopCh +} + +func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) { + l := LoadAverage{} + err := l.Get() + return l, err +} + +func (c *ConcreteSigar) GetMem() (Mem, error) { + m := Mem{} + err := m.Get() + return m, err +} + +func (c *ConcreteSigar) GetSwap() (Swap, error) { + s := Swap{} + err := s.Get() + return s, err +} + +func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) { + f := FileSystemUsage{} + err := f.Get(path) + return f, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go new file mode 100644 index 00000000..e3a8c4b9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_darwin.go @@ -0,0 +1,467 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "syscall" + "time" + "unsafe" +) + +func (self *LoadAverage) Get() error { + avg := []C.double{0, 0, 0} + + C.getloadavg(&avg[0], C.int(len(avg))) + + self.One = float64(avg[0]) + self.Five = float64(avg[1]) + self.Fifteen = float64(avg[2]) + + return nil +} + +func (self *Uptime) Get() error { + tv := syscall.Timeval32{} + + if err := sysctlbyname("kern.boottime", &tv); err != nil { + return err + } + + self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds() + + return nil +} + +func (self *Mem) Get() error { + var vmstat C.vm_statistics_data_t + + if err := sysctlbyname("hw.memsize", &self.Total); err != nil { + return err + } + + if err := vm_info(&vmstat); err != nil { + return err + } + + kern := uint64(vmstat.inactive_count) << 12 + self.Free = uint64(vmstat.free_count) << 12 + + self.Used = self.Total - self.Free + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +type xsw_usage struct { + Total, Avail, Used uint64 +} + +func (self *Swap) Get() error { + sw_usage := xsw_usage{} + + if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil { + return err + } + + self.Total = sw_usage.Total + self.Used = sw_usage.Used + self.Free = sw_usage.Avail + + return nil +} + +func (self *Cpu) Get() error { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpuload C.host_cpu_load_info_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics error=%d", status) + } + + self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) + self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) + self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) + self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) + + return nil +} + +func (self *CpuList) Get() error { + var count C.mach_msg_type_number_t + var cpuload *C.processor_cpu_load_info_data_t + var ncpu C.natural_t + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) + + bbuf := bytes.NewBuffer(buf) + + self.List = make([]Cpu, 0, ncpu) + + for i := 0; i < int(ncpu); i++ { + cpu := Cpu{} + + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return err + } + + cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) + cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) + cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) + cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) + + self.List = append(self.List, cpu) + } + + return nil +} + +func (self *FileSystemList) Get() error { + num, err := getfsstat(nil, C.MNT_NOWAIT) + if num < 0 { + return err + } + + buf := make([]syscall.Statfs_t, num) + + num, err = getfsstat(buf, C.MNT_NOWAIT) + if err != nil { + return err + } + + fslist := make([]FileSystem, 0, num) + + for i := 0; i < num; i++ { + fs := FileSystem{} + + fs.DirName = bytePtrToString(&buf[i].Mntonname[0]) + fs.DevName = bytePtrToString(&buf[i].Mntfromname[0]) + fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0]) + + fslist = append(fslist, fs) + } + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0) + if n <= 0 { + return syscall.EINVAL + } + buf := make([]byte, n) + n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n) + if n <= 0 { + return syscall.ENOMEM + } + + var pid int32 + num := int(n) / binary.Size(pid) + list := make([]int, 0, num) + bbuf := bytes.NewBuffer(buf) + + for i := 0; i < num; i++ { + if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil { + return err + } + if pid == 0 { + continue + } + + list = append(list, int(pid)) + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Name = C.GoString(&info.pbsd.pbi_comm[0]) + + switch info.pbsd.pbi_status { + case C.SIDL: + self.State = RunStateIdle + case C.SRUN: + self.State = RunStateRun + case C.SSLEEP: + self.State = RunStateSleep + case C.SSTOP: + self.State = RunStateStop + case C.SZOMB: + self.State = RunStateZombie + default: + self.State = RunStateUnknown + } + + self.Ppid = int(info.pbsd.pbi_ppid) + + self.Tty = int(info.pbsd.e_tdev) + + self.Priority = int(info.ptinfo.pti_priority) + + self.Nice = int(info.pbsd.pbi_nice) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.Size = uint64(info.ptinfo.pti_virtual_size) + self.Resident = uint64(info.ptinfo.pti_resident_size) + self.PageFaults = uint64(info.ptinfo.pti_faults) + + return nil +} + +func (self *ProcTime) Get(pid int) error { + info := C.struct_proc_taskallinfo{} + + if err := task_info(pid, &info); err != nil { + return err + } + + self.User = + uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond) + + self.Sys = + uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond) + + self.Total = self.User + self.Sys + + self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) + + (uint64(info.pbsd.pbi_start_tvusec) / 1000) + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + var args []string + + argv := func(arg string) { + args = append(args, arg) + } + + err := kern_procargs(pid, nil, argv, nil) + + self.List = args + + return err +} + +func (self *ProcExe) Get(pid int) error { + exe := func(arg string) { + self.Name = arg + } + + return kern_procargs(pid, exe, nil, nil) +} + +// wrapper around sysctl KERN_PROCARGS2 +// callbacks params are optional, +// up to the caller as to which pieces of data they want +func kern_procargs(pid int, + exe func(string), + argv func(string), + env func(string, string)) error { + + mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + argmax := uintptr(C.ARG_MAX) + buf := make([]byte, argmax) + err := sysctl(mib, &buf[0], &argmax, nil, 0) + if err != nil { + return nil + } + + bbuf := bytes.NewBuffer(buf) + bbuf.Truncate(int(argmax)) + + var argc int32 + binary.Read(bbuf, binary.LittleEndian, &argc) + + path, err := bbuf.ReadBytes(0) + if exe != nil { + exe(string(chop(path))) + } + + // skip trailing \0's + for { + c, _ := bbuf.ReadByte() + if c != 0 { + bbuf.UnreadByte() + break // start of argv[0] + } + } + + for i := 0; i < int(argc); i++ { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + if argv != nil { + argv(string(chop(arg))) + } + } + + if env == nil { + return nil + } + + delim := []byte{61} // "=" + + for { + line, err := bbuf.ReadBytes(0) + if err == io.EOF || line[0] == 0 { + break + } + pair := bytes.SplitN(chop(line), delim, 2) + env(string(pair[0]), string(pair[1])) + } + + return nil +} + +// XXX copied from zsyscall_darwin_amd64.go +func sysctl(mib []C.int, old *byte, oldlen *uintptr, + new *byte, newlen uintptr) (err error) { + var p0 unsafe.Pointer + p0 = unsafe.Pointer(&mib[0]) + _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = e1 + } + return +} + +func vm_info(vmstat *C.vm_statistics_data_t) error { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT + + status := C.host_statistics( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return fmt.Errorf("host_statistics=%d", status) + } + + return nil +} + +// generic Sysctl buffer unmarshalling +func sysctlbyname(name string, data interface{}) (err error) { + val, err := syscall.Sysctl(name) + if err != nil { + return err + } + + buf := []byte(val) + + switch v := data.(type) { + case *uint64: + *v = *(*uint64)(unsafe.Pointer(&buf[0])) + return + } + + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) +} + +// syscall.Getfsstat() wrapper is broken, roll our own to workaround. +func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) { + var ptr uintptr + var size uintptr + + if len(buf) > 0 { + ptr = uintptr(unsafe.Pointer(&buf[0])) + size = unsafe.Sizeof(buf[0]) * uintptr(len(buf)) + } else { + ptr = uintptr(0) + size = uintptr(0) + } + + trap := uintptr(syscall.SYS_GETFSSTAT64) + ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags)) + + n = int(ret) + if errno != 0 { + err = errno + } + + return +} + +func task_info(pid int, info *C.struct_proc_taskallinfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if n != size { + return syscall.ENOMEM + } + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_format.go b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go new file mode 100644 index 00000000..d80a64e8 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_format.go @@ -0,0 +1,126 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "time" +) + +// Go version of apr_strfsize +func FormatSize(size uint64) string { + ord := []string{"K", "M", "G", "T", "P", "E"} + o := 0 + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + + if size < 973 { + fmt.Fprintf(w, "%3d ", size) + w.Flush() + return buf.String() + } + + for { + remain := size & 1023 + size >>= 10 + + if size >= 973 { + o++ + continue + } + + if size < 9 || (size == 9 && remain < 973) { + remain = ((remain * 5) + 256) / 512 + if remain >= 10 { + size++ + remain = 0 + } + + fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o]) + break + } + + if remain >= 512 { + size++ + } + + fmt.Fprintf(w, "%3d%s", size, ord[o]) + break + } + + w.Flush() + return buf.String() +} + +func FormatPercent(percent float64) string { + return strconv.FormatFloat(percent, 'f', -1, 64) + "%" +} + +func (self *FileSystemUsage) UsePercent() float64 { + b_used := (self.Total - self.Free) / 1024 + b_avail := self.Avail / 1024 + utotal := b_used + b_avail + used := b_used + + if utotal != 0 { + u100 := used * 100 + pct := u100 / utotal + if u100%utotal != 0 { + pct += 1 + } + return (float64(pct) / float64(100)) * 100.0 + } + + return 0.0 +} + +func (self *Uptime) Format() string { + buf := new(bytes.Buffer) + w := bufio.NewWriter(buf) + uptime := uint64(self.Length) + + days := uptime / (60 * 60 * 24) + + if days != 0 { + s := "" + if days > 1 { + s = "s" + } + fmt.Fprintf(w, "%d day%s, ", days, s) + } + + minutes := uptime / 60 + hours := minutes / 60 + hours %= 24 + minutes %= 60 + + fmt.Fprintf(w, "%2d:%02d", hours, minutes) + + w.Flush() + return buf.String() +} + +func (self *ProcTime) FormatStartTime() string { + if self.StartTime == 0 { + return "00:00" + } + start := time.Unix(int64(self.StartTime)/1000, 0) + format := "Jan02" + if time.Since(start).Seconds() < (60 * 60 * 24) { + format = "15:04" + } + return start.Format(format) +} + +func (self *ProcTime) FormatTotal() string { + t := self.Total / 1000 + ss := t % 60 + t /= 60 + mm := t % 60 + t /= 60 + hh := t % 24 + return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss) +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go new file mode 100644 index 00000000..dd72a76b --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_interface.go @@ -0,0 +1,141 @@ +package sigar + +import ( + "time" +) + +type Sigar interface { + CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) + GetLoadAverage() (LoadAverage, error) + GetMem() (Mem, error) + GetSwap() (Swap, error) + GetFileSystemUsage(string) (FileSystemUsage, error) +} + +type Cpu struct { + User uint64 + Nice uint64 + Sys uint64 + Idle uint64 + Wait uint64 + Irq uint64 + SoftIrq uint64 + Stolen uint64 +} + +func (cpu *Cpu) Total() uint64 { + return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle + + cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen +} + +func (cpu Cpu) Delta(other Cpu) Cpu { + return Cpu{ + User: cpu.User - other.User, + Nice: cpu.Nice - other.Nice, + Sys: cpu.Sys - other.Sys, + Idle: cpu.Idle - other.Idle, + Wait: cpu.Wait - other.Wait, + Irq: cpu.Irq - other.Irq, + SoftIrq: cpu.SoftIrq - other.SoftIrq, + Stolen: cpu.Stolen - other.Stolen, + } +} + +type LoadAverage struct { + One, Five, Fifteen float64 +} + +type Uptime struct { + Length float64 +} + +type Mem struct { + Total uint64 + Used uint64 + Free uint64 + ActualFree uint64 + ActualUsed uint64 +} + +type Swap struct { + Total uint64 + Used uint64 + Free uint64 +} + +type CpuList struct { + List []Cpu +} + +type FileSystem struct { + DirName string + DevName string + TypeName string + SysTypeName string + Options string + Flags uint32 +} + +type FileSystemList struct { + List []FileSystem +} + +type FileSystemUsage struct { + Total uint64 + Used uint64 + Free uint64 + Avail uint64 + Files uint64 + FreeFiles uint64 +} + +type ProcList struct { + List []int +} + +type RunState byte + +const ( + RunStateSleep = 'S' + RunStateRun = 'R' + RunStateStop = 'T' + RunStateZombie = 'Z' + RunStateIdle = 'D' + RunStateUnknown = '?' +) + +type ProcState struct { + Name string + State RunState + Ppid int + Tty int + Priority int + Nice int + Processor int +} + +type ProcMem struct { + Size uint64 + Resident uint64 + Share uint64 + MinorFaults uint64 + MajorFaults uint64 + PageFaults uint64 +} + +type ProcTime struct { + StartTime uint64 + User uint64 + Sys uint64 + Total uint64 +} + +type ProcArgs struct { + List []string +} + +type ProcExe struct { + Name string + Cwd string + Root string +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go new file mode 100644 index 00000000..68ffb0f9 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_linux.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "bufio" + "bytes" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "syscall" +) + +var system struct { + ticks uint64 + btime uint64 +} + +var Procd string + +func init() { + system.ticks = 100 // C.sysconf(C._SC_CLK_TCK) + + Procd = "/proc" + + // grab system boot time + readFile(Procd+"/stat", func(line string) bool { + if strings.HasPrefix(line, "btime") { + system.btime, _ = strtoull(line[6:]) + return false // stop reading + } + return true + }) +} + +func (self *LoadAverage) Get() error { + line, err := ioutil.ReadFile(Procd + "/loadavg") + if err != nil { + return nil + } + + fields := strings.Fields(string(line)) + + self.One, _ = strconv.ParseFloat(fields[0], 64) + self.Five, _ = strconv.ParseFloat(fields[1], 64) + self.Fifteen, _ = strconv.ParseFloat(fields[2], 64) + + return nil +} + +func (self *Uptime) Get() error { + sysinfo := syscall.Sysinfo_t{} + + if err := syscall.Sysinfo(&sysinfo); err != nil { + return err + } + + self.Length = float64(sysinfo.Uptime) + + return nil +} + +func (self *Mem) Get() error { + var buffers, cached uint64 + table := map[string]*uint64{ + "MemTotal": &self.Total, + "MemFree": &self.Free, + "Buffers": &buffers, + "Cached": &cached, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + kern := buffers + cached + self.ActualFree = self.Free + kern + self.ActualUsed = self.Used - kern + + return nil +} + +func (self *Swap) Get() error { + table := map[string]*uint64{ + "SwapTotal": &self.Total, + "SwapFree": &self.Free, + } + + if err := parseMeminfo(table); err != nil { + return err + } + + self.Used = self.Total - self.Free + return nil +} + +func (self *Cpu) Get() error { + return readFile(Procd+"/stat", func(line string) bool { + if len(line) > 4 && line[0:4] == "cpu " { + parseCpuStat(self, line) + return false + } + return true + + }) +} + +func (self *CpuList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 4 + } + list := make([]Cpu, 0, capacity) + + err := readFile(Procd+"/stat", func(line string) bool { + if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' { + cpu := Cpu{} + parseCpuStat(&cpu, line) + list = append(list, cpu) + } + return true + }) + + self.List = list + + return err +} + +func (self *FileSystemList) Get() error { + capacity := len(self.List) + if capacity == 0 { + capacity = 10 + } + fslist := make([]FileSystem, 0, capacity) + + err := readFile("/etc/mtab", func(line string) bool { + fields := strings.Fields(line) + + fs := FileSystem{} + fs.DevName = fields[0] + fs.DirName = fields[1] + fs.SysTypeName = fields[2] + fs.Options = fields[3] + + fslist = append(fslist, fs) + + return true + }) + + self.List = fslist + + return err +} + +func (self *ProcList) Get() error { + dir, err := os.Open(Procd) + if err != nil { + return err + } + defer dir.Close() + + const readAllDirnames = -1 // see os.File.Readdirnames doc + + names, err := dir.Readdirnames(readAllDirnames) + if err != nil { + return err + } + + capacity := len(names) + list := make([]int, 0, capacity) + + for _, name := range names { + if name[0] < '0' || name[0] > '9' { + continue + } + pid, err := strconv.Atoi(name) + if err == nil { + list = append(list, pid) + } + } + + self.List = list + + return nil +} + +func (self *ProcState) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s + + self.State = RunState(fields[2][0]) + + self.Ppid, _ = strconv.Atoi(fields[3]) + + self.Tty, _ = strconv.Atoi(fields[6]) + + self.Priority, _ = strconv.Atoi(fields[17]) + + self.Nice, _ = strconv.Atoi(fields[18]) + + self.Processor, _ = strconv.Atoi(fields[38]) + + return nil +} + +func (self *ProcMem) Get(pid int) error { + contents, err := readProcFile(pid, "statm") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + size, _ := strtoull(fields[0]) + self.Size = size << 12 + + rss, _ := strtoull(fields[1]) + self.Resident = rss << 12 + + share, _ := strtoull(fields[2]) + self.Share = share << 12 + + contents, err = readProcFile(pid, "stat") + if err != nil { + return err + } + + fields = strings.Fields(string(contents)) + + self.MinorFaults, _ = strtoull(fields[10]) + self.MajorFaults, _ = strtoull(fields[12]) + self.PageFaults = self.MinorFaults + self.MajorFaults + + return nil +} + +func (self *ProcTime) Get(pid int) error { + contents, err := readProcFile(pid, "stat") + if err != nil { + return err + } + + fields := strings.Fields(string(contents)) + + user, _ := strtoull(fields[13]) + sys, _ := strtoull(fields[14]) + // convert to millis + self.User = user * (1000 / system.ticks) + self.Sys = sys * (1000 / system.ticks) + self.Total = self.User + self.Sys + + // convert to millis + self.StartTime, _ = strtoull(fields[21]) + self.StartTime /= system.ticks + self.StartTime += system.btime + self.StartTime *= 1000 + + return nil +} + +func (self *ProcArgs) Get(pid int) error { + contents, err := readProcFile(pid, "cmdline") + if err != nil { + return err + } + + bbuf := bytes.NewBuffer(contents) + + var args []string + + for { + arg, err := bbuf.ReadBytes(0) + if err == io.EOF { + break + } + args = append(args, string(chop(arg))) + } + + self.List = args + + return nil +} + +func (self *ProcExe) Get(pid int) error { + fields := map[string]*string{ + "exe": &self.Name, + "cwd": &self.Cwd, + "root": &self.Root, + } + + for name, field := range fields { + val, err := os.Readlink(procFileName(pid, name)) + + if err != nil { + return err + } + + *field = val + } + + return nil +} + +func parseMeminfo(table map[string]*uint64) error { + return readFile(Procd+"/meminfo", func(line string) bool { + fields := strings.Split(line, ":") + + if ptr := table[fields[0]]; ptr != nil { + num := strings.TrimLeft(fields[1], " ") + val, err := strtoull(strings.Fields(num)[0]) + if err == nil { + *ptr = val * 1024 + } + } + + return true + }) +} + +func parseCpuStat(self *Cpu, line string) error { + fields := strings.Fields(line) + + self.User, _ = strtoull(fields[1]) + self.Nice, _ = strtoull(fields[2]) + self.Sys, _ = strtoull(fields[3]) + self.Idle, _ = strtoull(fields[4]) + self.Wait, _ = strtoull(fields[5]) + self.Irq, _ = strtoull(fields[6]) + self.SoftIrq, _ = strtoull(fields[7]) + self.Stolen, _ = strtoull(fields[8]) + + return nil +} + +func readFile(file string, handler func(string) bool) error { + contents, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + reader := bufio.NewReader(bytes.NewBuffer(contents)) + + for { + line, _, err := reader.ReadLine() + if err == io.EOF { + break + } + if !handler(string(line)) { + break + } + } + + return nil +} + +func strtoull(val string) (uint64, error) { + return strconv.ParseUint(val, 10, 64) +} + +func procFileName(pid int, name string) string { + return Procd + "/" + strconv.Itoa(pid) + "/" + name +} + +func readProcFile(pid int, name string) ([]byte, error) { + path := procFileName(pid, name) + contents, err := ioutil.ReadFile(path) + + if err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err == syscall.ENOENT { + return nil, syscall.ESRCH + } + } + } + + return contents, err +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go new file mode 100644 index 00000000..39f18784 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_unix.go @@ -0,0 +1,26 @@ +// Copyright (c) 2012 VMware, Inc. + +// +build darwin freebsd linux netbsd openbsd + +package sigar + +import "syscall" + +func (self *FileSystemUsage) Get(path string) error { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return err + } + + bsize := stat.Bsize / 512 + + self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1 + self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1 + self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1 + self.Used = self.Total - self.Free + self.Files = stat.Files + self.FreeFiles = stat.Ffree + + return nil +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_util.go b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go new file mode 100644 index 00000000..a02df941 --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_util.go @@ -0,0 +1,22 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +import ( + "unsafe" +) + +func bytePtrToString(ptr *int8) string { + bytes := (*[10000]byte)(unsafe.Pointer(ptr)) + + n := 0 + for bytes[n] != 0 { + n++ + } + + return string(bytes[0:n]) +} + +func chop(buf []byte) []byte { + return buf[0 : len(buf)-1] +} diff --git a/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go new file mode 100644 index 00000000..0c779d7c --- /dev/null +++ b/vendor/github.com/cloudfoundry/gosigar/sigar_windows.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012 VMware, Inc. + +package sigar + +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +func init() { +} + +func (self *LoadAverage) Get() error { + return nil +} + +func (self *Uptime) Get() error { + return nil +} + +func (self *Mem) Get() error { + var statex C.MEMORYSTATUSEX + statex.dwLength = C.DWORD(unsafe.Sizeof(statex)) + + succeeded := C.GlobalMemoryStatusEx(&statex) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError)) + } + + self.Total = uint64(statex.ullTotalPhys) + return nil +} + +func (self *Swap) Get() error { + return notImplemented() +} + +func (self *Cpu) Get() error { + return notImplemented() +} + +func (self *CpuList) Get() error { + return notImplemented() +} + +func (self *FileSystemList) Get() error { + return notImplemented() +} + +func (self *ProcList) Get() error { + return notImplemented() +} + +func (self *ProcState) Get(pid int) error { + return notImplemented() +} + +func (self *ProcMem) Get(pid int) error { + return notImplemented() +} + +func (self *ProcTime) Get(pid int) error { + return notImplemented() +} + +func (self *ProcArgs) Get(pid int) error { + return notImplemented() +} + +func (self *ProcExe) Get(pid int) error { + return notImplemented() +} + +func (self *FileSystemUsage) Get(path string) error { + var availableBytes C.ULARGE_INTEGER + var totalBytes C.ULARGE_INTEGER + var totalFreeBytes C.ULARGE_INTEGER + + pathChars := C.CString(path) + defer C.free(unsafe.Pointer(pathChars)) + + succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes) + if succeeded == C.FALSE { + lastError := C.GetLastError() + return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError)) + } + + self.Total = *(*uint64)(unsafe.Pointer(&totalBytes)) + return nil +} + +func notImplemented() error { + panic("Not Implemented") + return nil +} diff --git a/vendor/github.com/coreos/go-systemd/.travis.yml b/vendor/github.com/coreos/go-systemd/.travis.yml new file mode 100644 index 00000000..3c37292e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: 1.4 + +install: + - go get github.com/godbus/dbus + +script: + - ./test diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/README.md b/vendor/github.com/coreos/go-systemd/README.md new file mode 100644 index 00000000..cb87a112 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/README.md @@ -0,0 +1,54 @@ +# go-systemd + +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) + +Go bindings to systemd. The project has several packages: + +- `activation` - for writing and using socket activation from Go +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files + +## Socket Activation + +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## Journal + +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. + +## D-Bus + +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/vendor/github.com/coreos/go-systemd/activation/files.go b/vendor/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 00000000..c8e85fcd --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,52 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + defer os.Unsetenv("LISTEN_PID") + defer os.Unsetenv("LISTEN_FDS") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + files := make([]*os.File, 0, nfds) + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 00000000..df27c29e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,62 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "crypto/tls" + "net" +) + +// Listeners returns a slice containing a net.Listener for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + if pc, err := net.FileListener(f); err == nil { + listeners[i] = pc + } + } + return listeners, nil +} + +// TLSListeners returns a slice containing a net.listener for each matching TCP socket type +// passed to this process. +// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. +func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { + listeners, err := Listeners(unsetEnv) + + if listeners == nil || err != nil { + return nil, err + } + + if tlsConfig != nil && err == nil { + tlsConfig.NextProtos = []string{"http/1.1"} + + for i, l := range listeners { + // Activate TLS only for TCP sockets + if l.Addr().Network() == "tcp" { + listeners[i] = tls.NewListener(l, tlsConfig) + } + } + } + + return listeners, err +} diff --git a/vendor/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/github.com/coreos/go-systemd/activation/packetconns.go new file mode 100644 index 00000000..48b2ca02 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/activation/packetconns.go @@ -0,0 +1,37 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package activation + +import ( + "net" +) + +// PacketConns returns a slice containing a net.PacketConn for each matching socket type +// passed to this process. +// +// The order of the file descriptors is preserved in the returned slice. +// Nil values are used to fill any gaps. For example if systemd were to return file descriptors +// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} +func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { + files := Files(unsetEnv) + conns := make([]net.PacketConn, len(files)) + + for i, f := range files { + if pc, err := net.FilePacketConn(f); err == nil { + conns[i] = pc + } + } + return conns, nil +} diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go new file mode 100644 index 00000000..b92b1911 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go @@ -0,0 +1,31 @@ +// Code forked from Docker project +package daemon + +import ( + "errors" + "net" + "os" +) + +var SdNotifyNoSocket = errors.New("No socket") + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +func SdNotify(state string) error { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + if socketAddr.Name == "" { + return SdNotifyNoSocket + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return err + } + defer conn.Close() + + _, err = conn.Write([]byte(state)) + return err +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 00000000..5dd748e6 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,187 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } +} + +// New establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return newConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func() (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +func newConnection(createBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := createBus() + if err != nil { + return nil, err + } + + sigconn, err := createBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 00000000..ab614c7c --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,410 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "path" + "strconv" + + "github.com/godbus/dbus" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 00000000..75200115 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,218 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 00000000..f92e6fbe --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val, _ := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 00000000..99645144 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,250 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + return err + } + + return nil +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 00000000..5b408d58 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/coreos/go-systemd/test b/vendor/github.com/coreos/go-systemd/test new file mode 100755 index 00000000..bc1b9859 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/test @@ -0,0 +1,76 @@ +#!/bin/bash -e +# +# Run all tests +# ./test +# ./test -v +# +# Run tests for one package +# PKG=./foo ./test +# PKG=bar ./test +# + +# Invoke ./cover for HTML output +COVER=${COVER:-"-cover"} + +PROJ="go-systemd" +ORG_PATH="github.com/coreos" +REPO_PATH="${ORG_PATH}/${PROJ}" + +# As a convenience, set up a self-contained GOPATH if none set +if [ -z "$GOPATH" ]; then + if [ ! -h gopath/src/${REPO_PATH} ]; then + mkdir -p gopath/src/${ORG_PATH} + ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255 + fi + export GOPATH=${PWD}/gopath + go get -u github.com/godbus/dbus +fi + +TESTABLE="activation journal login1 machine1 unit" +FORMATTABLE="$TESTABLE sdjournal dbus" +if [ -e "/run/systemd/system/" ]; then + TESTABLE="${TESTABLE} sdjournal" + if [ "$EUID" == "0" ]; then + # testing actual systemd behaviour requires root + TESTABLE="${TESTABLE} dbus" + fi +fi + + +# user has not provided PKG override +if [ -z "$PKG" ]; then + TEST=$TESTABLE + FMT=$FORMATTABLE + +# user has provided PKG override +else + # strip out slashes and dots from PKG=./foo/ + TEST=${PKG//\//} + TEST=${TEST//./} + + # only run gofmt on packages provided by user + FMT="$TEST" +fi + +# split TEST into an array and prepend REPO_PATH to each local package +split=(${TEST// / }) +TEST=${split[@]/#/${REPO_PATH}/} + +echo "Running tests..." +go test ${COVER} $@ ${TEST} + +echo "Checking gofmt..." +fmtRes=$(gofmt -l $FMT) +if [ -n "${fmtRes}" ]; then + echo -e "gofmt checking failed:\n${fmtRes}" + exit 255 +fi + +echo "Checking govet..." +vetRes=$(go vet $TEST) +if [ -n "${vetRes}" ]; then + echo -e "govet checking failed:\n${vetRes}" + exit 255 +fi + +echo "Success" diff --git a/vendor/github.com/coreos/go-systemd/util/util.go b/vendor/github.com/coreos/go-systemd/util/util.go new file mode 100644 index 00000000..33832a1e --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/util/util.go @@ -0,0 +1,33 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package util contains utility functions related to systemd that applications +// can use to check things like whether systemd is running. +package util + +import ( + "os" +) + +// IsRunningSystemd checks whether the host was booted with systemd as its init +// system. This functions similar to systemd's `sd_booted(3)`: internally, it +// checks whether /run/systemd/system/ exists and is a directory. +// http://www.freedesktop.org/software/systemd/man/sd_booted.html +func IsRunningSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} diff --git a/vendor/github.com/docker/containerd/Makefile b/vendor/github.com/docker/containerd/Makefile index 81267b4e..fdd38fcd 100644 --- a/vendor/github.com/docker/containerd/Makefile +++ b/vendor/github.com/docker/containerd/Makefile @@ -18,8 +18,8 @@ ifeq ($(INTERACTIVE), 1) DOCKER_FLAGS += -t endif -TEST_ARTIFACTS_DIR := integration-test/test-artifacts -BUNDLE_ARCHIVES_DIR := $(TEST_ARTIFACTS_DIR)/archives +TESTBENCH_ARTIFACTS_DIR := output/test-artifacts +TESTBENCH_BUNDLE_DIR := $(TESTBENCH_ARTIFACTS_DIR)/archives DOCKER_IMAGE := containerd-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN := docker run --privileged --rm -i $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" @@ -35,7 +35,7 @@ bin: mkdir -p bin/ clean: - rm -rf bin + rm -rf bin && rm -rf output client: bin cd ctr && go build -ldflags "${LDFLAGS}" -o ../bin/ctr @@ -55,18 +55,21 @@ shim: bin shim-static: cd containerd-shim && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd-shim -$(BUNDLE_ARCHIVES_DIR)/busybox.tar: - @mkdir -p $(BUNDLE_ARCHIVES_DIR) - curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' -o $(BUNDLE_ARCHIVES_DIR)/busybox.tar +$(TESTBENCH_BUNDLE_DIR)/busybox.tar: + mkdir -p $(TESTBENCH_BUNDLE_DIR) + curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' -o $(TESTBENCH_BUNDLE_DIR)/busybox.tar -bundles-rootfs: $(BUNDLE_ARCHIVES_DIR)/busybox.tar +bundles-rootfs: $(TESTBENCH_BUNDLE_DIR)/busybox.tar -dbuild: $(BUNDLE_ARCHIVES_DIR)/busybox.tar +dbuild: $(TESTBENCH_BUNDLE_DIR)/busybox.tar @docker build --rm --force-rm -t "$(DOCKER_IMAGE)" . dtest: dbuild $(DOCKER_RUN) make test +dbench: dbuild + $(DOCKER_RUN) make bench + install: cp bin/* /usr/local/bin/ @@ -82,15 +85,17 @@ lint: shell: dbuild $(DOCKER_RUN) bash -test: validate - go test -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) +test: validate install bundles-rootfs + go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) ifneq ($(wildcard /.dockerenv), ) - $(MAKE) install bundles-rootfs cd integration-test ; \ go test -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test && \ go test -containerd.shim="" -check.v -check.timeout=$(TEST_TIMEOUT) -timeout=$(TEST_SUITE_TIMEOUT) $(TESTFLAGS) github.com/docker/containerd/integration-test endif +bench: shim validate install bundles-rootfs + go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) + validate: fmt uninstall: diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server.go b/vendor/github.com/docker/containerd/api/grpc/server/server.go new file mode 100644 index 00000000..55eb7137 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server.go @@ -0,0 +1,468 @@ +package server + +import ( + "bufio" + "errors" + "fmt" + "os" + "strconv" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + "github.com/docker/containerd" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/supervisor" + "golang.org/x/net/context" +) + +type apiServer struct { + sv *supervisor.Supervisor +} + +// NewServer returns grpc server instance +func NewServer(sv *supervisor.Supervisor) types.APIServer { + return &apiServer{ + sv: sv, + } +} + +func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) { + return &types.GetServerVersionResponse{ + Major: containerd.VersionMajor, + Minor: containerd.VersionMinor, + Patch: containerd.VersionPatch, + Revision: containerd.GitCommit, + }, nil +} + +func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) { + if c.BundlePath == "" { + return nil, errors.New("empty bundle path") + } + e := &supervisor.StartTask{} + e.ID = c.Id + e.BundlePath = c.BundlePath + e.Stdin = c.Stdin + e.Stdout = c.Stdout + e.Stderr = c.Stderr + e.Labels = c.Labels + e.NoPivotRoot = c.NoPivotRoot + e.Runtime = c.Runtime + e.RuntimeArgs = c.RuntimeArgs + e.StartResponse = make(chan supervisor.StartResponse, 1) + if c.Checkpoint != "" { + e.CheckpointDir = c.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: c.Checkpoint, + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + r := <-e.StartResponse + apiC, err := createAPIContainer(r.Container, false) + if err != nil { + return nil, err + } + return &types.CreateContainerResponse{ + Container: apiC, + }, nil +} + +func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) { + e := &supervisor.CreateCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Checkpoint.Name, + Exit: r.Checkpoint.Exit, + Tcp: r.Checkpoint.Tcp, + UnixSockets: r.Checkpoint.UnixSockets, + Shell: r.Checkpoint.Shell, + } + + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.CreateCheckpointResponse{}, nil +} + +func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) { + if r.Name == "" { + return nil, errors.New("checkpoint name cannot be empty") + } + e := &supervisor.DeleteCheckpointTask{} + e.ID = r.Id + e.CheckpointDir = r.CheckpointDir + e.Checkpoint = &runtime.Checkpoint{ + Name: r.Name, + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.DeleteCheckpointResponse{}, nil +} + +func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) { + e := &supervisor.GetContainersTask{} + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + var container runtime.Container + for _, c := range e.Containers { + if c.ID() == r.Id { + container = c + break + } + } + if container == nil { + return nil, grpc.Errorf(codes.NotFound, "no such containers") + } + var out []*types.Checkpoint + checkpoints, err := container.Checkpoints(r.CheckpointDir) + if err != nil { + return nil, err + } + for _, c := range checkpoints { + out = append(out, &types.Checkpoint{ + Name: c.Name, + Tcp: c.Tcp, + Shell: c.Shell, + UnixSockets: c.UnixSockets, + // TODO: figure out timestamp + //Timestamp: c.Timestamp, + }) + } + return &types.ListCheckpointResponse{Checkpoints: out}, nil +} + +func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) { + e := &supervisor.SignalTask{} + e.ID = r.Id + e.PID = r.Pid + e.Signal = syscall.Signal(int(r.Signal)) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.SignalResponse{}, nil +} + +func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) { + e := &supervisor.GetContainersTask{} + e.ID = r.Id + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + m := s.sv.Machine() + state := &types.StateResponse{ + Machine: &types.Machine{ + Cpus: uint32(m.Cpus), + Memory: uint64(m.Memory), + }, + } + for _, c := range e.Containers { + apiC, err := createAPIContainer(c, true) + if err != nil { + return nil, err + } + state.Containers = append(state.Containers, apiC) + } + return state, nil +} + +func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) { + processes, err := c.Processes() + if err != nil { + return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error()) + } + var procs []*types.Process + for _, p := range processes { + oldProc := p.Spec() + stdio := p.Stdio() + proc := &types.Process{ + Pid: p.ID(), + SystemPid: uint32(p.SystemPid()), + Terminal: oldProc.Terminal, + Args: oldProc.Args, + Env: oldProc.Env, + Cwd: oldProc.Cwd, + Stdin: stdio.Stdin, + Stdout: stdio.Stdout, + Stderr: stdio.Stderr, + } + proc.User = &types.User{ + Uid: oldProc.User.UID, + Gid: oldProc.User.GID, + AdditionalGids: oldProc.User.AdditionalGids, + } + proc.Capabilities = oldProc.Capabilities + proc.ApparmorProfile = oldProc.ApparmorProfile + proc.SelinuxLabel = oldProc.SelinuxLabel + proc.NoNewPrivileges = oldProc.NoNewPrivileges + for _, rl := range oldProc.Rlimits { + proc.Rlimits = append(proc.Rlimits, &types.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + procs = append(procs, proc) + } + var pids []int + state := c.State() + if getPids && (state == runtime.Running || state == runtime.Paused) { + if pids, err = c.Pids(); err != nil { + return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error()) + } + } + return &types.Container{ + Id: c.ID(), + BundlePath: c.Path(), + Processes: procs, + Labels: c.Labels(), + Status: string(state), + Pids: toUint32(pids), + Runtime: c.Runtime(), + }, nil +} + +func toUint32(its []int) []uint32 { + o := []uint32{} + for _, i := range its { + o = append(o, uint32(i)) + } + return o +} + +func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) { + e := &supervisor.UpdateTask{} + e.ID = r.Id + e.State = runtime.State(r.Status) + if r.Resources != nil { + rs := r.Resources + e.Resources = &runtime.Resource{} + if rs.CpuShares != 0 { + e.Resources.CPUShares = int64(rs.CpuShares) + } + if rs.BlkioWeight != 0 { + e.Resources.BlkioWeight = uint16(rs.BlkioWeight) + } + if rs.CpuPeriod != 0 { + e.Resources.CPUPeriod = int64(rs.CpuPeriod) + } + if rs.CpuQuota != 0 { + e.Resources.CPUQuota = int64(rs.CpuQuota) + } + if rs.CpusetCpus != "" { + e.Resources.CpusetCpus = rs.CpusetCpus + } + if rs.CpusetMems != "" { + e.Resources.CpusetMems = rs.CpusetMems + } + if rs.KernelMemoryLimit != 0 { + e.Resources.KernelMemory = int64(rs.KernelMemoryLimit) + } + if rs.MemoryLimit != 0 { + e.Resources.Memory = int64(rs.MemoryLimit) + } + if rs.MemoryReservation != 0 { + e.Resources.MemoryReservation = int64(rs.MemoryReservation) + } + if rs.MemorySwap != 0 { + e.Resources.MemorySwap = int64(rs.MemorySwap) + } + } + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateContainerResponse{}, nil +} + +func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) { + e := &supervisor.UpdateProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.Height = int(r.Height) + e.Width = int(r.Width) + e.CloseStdin = r.CloseStdin + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + return &types.UpdateProcessResponse{}, nil +} + +func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { + t := time.Time{} + if r.Timestamp != 0 { + t = time.Unix(int64(r.Timestamp), 0) + } + events := s.sv.Events(t) + defer s.sv.Unsubscribe(events) + for e := range events { + if err := stream.Send(&types.Event{ + Id: e.ID, + Type: e.Type, + Timestamp: uint64(e.Timestamp.Unix()), + Pid: e.PID, + Status: uint32(e.Status), + }); err != nil { + return err + } + } + return nil +} + +func convertToPb(st *runtime.Stat) *types.StatsResponse { + pbSt := &types.StatsResponse{ + Timestamp: uint64(st.Timestamp.Unix()), + CgroupStats: &types.CgroupStats{}, + } + systemUsage, _ := getSystemCPUUsage() + pbSt.CgroupStats.CpuStats = &types.CpuStats{ + CpuUsage: &types.CpuUsage{ + TotalUsage: st.Cpu.Usage.Total, + PercpuUsage: st.Cpu.Usage.Percpu, + UsageInKernelmode: st.Cpu.Usage.Kernel, + UsageInUsermode: st.Cpu.Usage.User, + }, + ThrottlingData: &types.ThrottlingData{ + Periods: st.Cpu.Throttling.Periods, + ThrottledPeriods: st.Cpu.Throttling.ThrottledPeriods, + ThrottledTime: st.Cpu.Throttling.ThrottledTime, + }, + SystemUsage: systemUsage, + } + pbSt.CgroupStats.MemoryStats = &types.MemoryStats{ + Cache: st.Memory.Cache, + Usage: &types.MemoryData{ + Usage: st.Memory.Usage.Usage, + MaxUsage: st.Memory.Usage.Max, + Failcnt: st.Memory.Usage.Failcnt, + Limit: st.Memory.Usage.Limit, + }, + SwapUsage: &types.MemoryData{ + Usage: st.Memory.Swap.Usage, + MaxUsage: st.Memory.Swap.Max, + Failcnt: st.Memory.Swap.Failcnt, + Limit: st.Memory.Swap.Limit, + }, + KernelUsage: &types.MemoryData{ + Usage: st.Memory.Kernel.Usage, + MaxUsage: st.Memory.Kernel.Max, + Failcnt: st.Memory.Kernel.Failcnt, + Limit: st.Memory.Kernel.Limit, + }, + Stats: st.Memory.Raw, + } + pbSt.CgroupStats.BlkioStats = &types.BlkioStats{ + IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive), + IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive), + IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive), + IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive), + IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive), + IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive), + SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive), + } + pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats) + for k, st := range st.Hugetlb { + pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{ + Usage: st.Usage, + MaxUsage: st.Max, + Failcnt: st.Failcnt, + } + } + pbSt.CgroupStats.PidsStats = &types.PidsStats{ + Current: st.Pids.Current, + Limit: st.Pids.Limit, + } + return pbSt +} + +func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry { + var pbEs []*types.BlkioStatsEntry + for _, e := range b { + pbEs = append(pbEs, &types.BlkioStatsEntry{ + Major: e.Major, + Minor: e.Minor, + Op: e.Op, + Value: e.Value, + }) + } + return pbEs +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + bufReader := bufio.NewReaderSize(nil, 128) + defer func() { + bufReader.Reset(nil) + f.Close() + }() + bufReader.Reset(f) + err = nil + for err == nil { + line, err = bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("bad format of cpu stats") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("error parsing cpu stats") + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("bad stats format") +} + +func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) { + e := &supervisor.StatsTask{} + e.ID = r.Id + e.Stat = make(chan *runtime.Stat, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + stats := <-e.Stat + t := convertToPb(stats) + return t, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go new file mode 100644 index 00000000..1051f1f0 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_linux.go @@ -0,0 +1,59 @@ +package server + +import ( + "fmt" + + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/supervisor" + "github.com/opencontainers/runc/libcontainer/system" + ocs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +var clockTicksPerSecond = uint64(system.GetClockTicks()) + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + process := &specs.ProcessSpec{ + Terminal: r.Terminal, + Args: r.Args, + Env: r.Env, + Cwd: r.Cwd, + } + process.User = ocs.User{ + UID: r.User.Uid, + GID: r.User.Gid, + AdditionalGids: r.User.AdditionalGids, + } + process.Capabilities = r.Capabilities + process.ApparmorProfile = r.ApparmorProfile + process.SelinuxLabel = r.SelinuxLabel + process.NoNewPrivileges = r.NoNewPrivileges + for _, rl := range r.Rlimits { + process.Rlimits = append(process.Rlimits, ocs.Rlimit{ + Type: rl.Type, + Soft: rl.Soft, + Hard: rl.Hard, + }) + } + if r.Id == "" { + return nil, fmt.Errorf("container id cannot be empty") + } + if r.Pid == "" { + return nil, fmt.Errorf("process id cannot be empty") + } + e := &supervisor.AddProcessTask{} + e.ID = r.Id + e.PID = r.Pid + e.ProcessSpec = process + e.Stdin = r.Stdin + e.Stdout = r.Stdout + e.Stderr = r.Stderr + e.StartResponse = make(chan supervisor.StartResponse, 1) + s.sv.SendTask(e) + if err := <-e.ErrorCh(); err != nil { + return nil, err + } + <-e.StartResponse + return &types.AddProcessResponse{}, nil +} diff --git a/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go new file mode 100644 index 00000000..bf64b9a2 --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/server/server_solaris.go @@ -0,0 +1,14 @@ +package server + +import ( + "errors" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" +) + +var clockTicksPerSecond uint64 + +func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { + return &types.AddProcessResponse{}, errors.New("apiServer AddProcess() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go new file mode 100644 index 00000000..52c6dcbe --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.pb.go @@ -0,0 +1,1438 @@ +// Code generated by protoc-gen-go. +// source: api.proto +// DO NOT EDIT! + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + api.proto + +It has these top-level messages: + GetServerVersionRequest + GetServerVersionResponse + UpdateProcessRequest + UpdateProcessResponse + CreateContainerRequest + CreateContainerResponse + SignalRequest + SignalResponse + AddProcessRequest + Rlimit + User + AddProcessResponse + CreateCheckpointRequest + CreateCheckpointResponse + DeleteCheckpointRequest + DeleteCheckpointResponse + ListCheckpointRequest + Checkpoint + ListCheckpointResponse + StateRequest + ContainerState + Process + Container + Machine + StateResponse + UpdateContainerRequest + UpdateResource + UpdateContainerResponse + EventsRequest + Event + NetworkStats + CpuUsage + ThrottlingData + CpuStats + PidsStats + MemoryData + MemoryStats + BlkioStatsEntry + BlkioStats + HugetlbStats + CgroupStats + StatsResponse + StatsRequest +*/ +package types + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type GetServerVersionRequest struct { +} + +func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } +func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionRequest) ProtoMessage() {} +func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetServerVersionResponse struct { + Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` +} + +func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } +func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetServerVersionResponse) ProtoMessage() {} +func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type UpdateProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` + Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` + Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` +} + +func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } +func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessRequest) ProtoMessage() {} +func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type UpdateProcessResponse struct { +} + +func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } +func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateProcessResponse) ProtoMessage() {} +func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type CreateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` + Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` + NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` + Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` + RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` + CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type CreateContainerResponse struct { + Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CreateContainerResponse) GetContainer() *Container { + if m != nil { + return m.Container + } + return nil +} + +type SignalRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` +} + +func (m *SignalRequest) Reset() { *m = SignalRequest{} } +func (m *SignalRequest) String() string { return proto.CompactTextString(m) } +func (*SignalRequest) ProtoMessage() {} +func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type SignalResponse struct { +} + +func (m *SignalResponse) Reset() { *m = SignalResponse{} } +func (m *SignalResponse) String() string { return proto.CompactTextString(m) } +func (*SignalResponse) ProtoMessage() {} +func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type AddProcessRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } +func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } +func (*AddProcessRequest) ProtoMessage() {} +func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *AddProcessRequest) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *AddProcessRequest) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Rlimit struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` + Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` +} + +func (m *Rlimit) Reset() { *m = Rlimit{} } +func (m *Rlimit) String() string { return proto.CompactTextString(m) } +func (*Rlimit) ProtoMessage() {} +func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type User struct { + Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` + AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +type AddProcessResponse struct { +} + +func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } +func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } +func (*AddProcessResponse) ProtoMessage() {} +func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +type CreateCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } +func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointRequest) ProtoMessage() {} +func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { + if m != nil { + return m.Checkpoint + } + return nil +} + +type CreateCheckpointResponse struct { +} + +func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } +func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*CreateCheckpointResponse) ProtoMessage() {} +func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type DeleteCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } +func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointRequest) ProtoMessage() {} +func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +type DeleteCheckpointResponse struct { +} + +func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } +func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteCheckpointResponse) ProtoMessage() {} +func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type ListCheckpointRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` +} + +func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } +func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointRequest) ProtoMessage() {} +func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +type Checkpoint struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` + Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` + UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` + Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` +} + +func (m *Checkpoint) Reset() { *m = Checkpoint{} } +func (m *Checkpoint) String() string { return proto.CompactTextString(m) } +func (*Checkpoint) ProtoMessage() {} +func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +type ListCheckpointResponse struct { + Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` +} + +func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } +func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*ListCheckpointResponse) ProtoMessage() {} +func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { + if m != nil { + return m.Checkpoints + } + return nil +} + +type StateRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StateRequest) Reset() { *m = StateRequest{} } +func (m *StateRequest) String() string { return proto.CompactTextString(m) } +func (*StateRequest) ProtoMessage() {} +func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +type ContainerState struct { + Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` +} + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (m *ContainerState) String() string { return proto.CompactTextString(m) } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +type Process struct { + Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` + Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` + User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` + SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` + Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` + Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` + ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` + SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` + NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` + Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` +} + +func (m *Process) Reset() { *m = Process{} } +func (m *Process) String() string { return proto.CompactTextString(m) } +func (*Process) ProtoMessage() {} +func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *Process) GetUser() *User { + if m != nil { + return m.User + } + return nil +} + +func (m *Process) GetRlimits() []*Rlimit { + if m != nil { + return m.Rlimits + } + return nil +} + +type Container struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` + Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` + Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` + Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` +} + +func (m *Container) Reset() { *m = Container{} } +func (m *Container) String() string { return proto.CompactTextString(m) } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *Container) GetProcesses() []*Process { + if m != nil { + return m.Processes + } + return nil +} + +// Machine is information about machine on which containerd is run +type Machine struct { + Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` + Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` +} + +func (m *Machine) Reset() { *m = Machine{} } +func (m *Machine) String() string { return proto.CompactTextString(m) } +func (*Machine) ProtoMessage() {} +func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +// StateResponse is information about containerd daemon +type StateResponse struct { + Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` + Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` +} + +func (m *StateResponse) Reset() { *m = StateResponse{} } +func (m *StateResponse) String() string { return proto.CompactTextString(m) } +func (*StateResponse) ProtoMessage() {} +func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *StateResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +func (m *StateResponse) GetMachine() *Machine { + if m != nil { + return m.Machine + } + return nil +} + +type UpdateContainerRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` + Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` +} + +func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } +func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerRequest) ProtoMessage() {} +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *UpdateContainerRequest) GetResources() *UpdateResource { + if m != nil { + return m.Resources + } + return nil +} + +type UpdateResource struct { + BlkioWeight uint32 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` + CpuShares uint32 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` + CpuPeriod uint32 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` + CpuQuota uint32 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` + CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` + CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` + MemoryLimit uint32 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` + MemorySwap uint32 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` + MemoryReservation uint32 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` + KernelMemoryLimit uint32 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` +} + +func (m *UpdateResource) Reset() { *m = UpdateResource{} } +func (m *UpdateResource) String() string { return proto.CompactTextString(m) } +func (*UpdateResource) ProtoMessage() {} +func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type UpdateContainerResponse struct { +} + +func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } +func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateContainerResponse) ProtoMessage() {} +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +type EventsRequest struct { + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *EventsRequest) Reset() { *m = EventsRequest{} } +func (m *EventsRequest) String() string { return proto.CompactTextString(m) } +func (*EventsRequest) ProtoMessage() {} +func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` + Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` + Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type NetworkStats struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes" json:"rx_bytes,omitempty"` + Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets" json:"rx_Packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors" json:"Rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped" json:"Rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes" json:"Tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets" json:"Tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors" json:"Tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped" json:"Tx_dropped,omitempty"` +} + +func (m *NetworkStats) Reset() { *m = NetworkStats{} } +func (m *NetworkStats) String() string { return proto.CompactTextString(m) } +func (*NetworkStats) ProtoMessage() {} +func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type CpuUsage struct { + TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage" json:"total_usage,omitempty"` + PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage" json:"percpu_usage,omitempty"` + UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode" json:"usage_in_kernelmode,omitempty"` + UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode" json:"usage_in_usermode,omitempty"` +} + +func (m *CpuUsage) Reset() { *m = CpuUsage{} } +func (m *CpuUsage) String() string { return proto.CompactTextString(m) } +func (*CpuUsage) ProtoMessage() {} +func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type ThrottlingData struct { + Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time" json:"throttled_time,omitempty"` +} + +func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } +func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } +func (*ThrottlingData) ProtoMessage() {} +func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +type CpuStats struct { + CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage" json:"cpu_usage,omitempty"` + ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data" json:"throttling_data,omitempty"` + SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage" json:"system_usage,omitempty"` +} + +func (m *CpuStats) Reset() { *m = CpuStats{} } +func (m *CpuStats) String() string { return proto.CompactTextString(m) } +func (*CpuStats) ProtoMessage() {} +func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *CpuStats) GetCpuUsage() *CpuUsage { + if m != nil { + return m.CpuUsage + } + return nil +} + +func (m *CpuStats) GetThrottlingData() *ThrottlingData { + if m != nil { + return m.ThrottlingData + } + return nil +} + +type PidsStats struct { + Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` +} + +func (m *PidsStats) Reset() { *m = PidsStats{} } +func (m *PidsStats) String() string { return proto.CompactTextString(m) } +func (*PidsStats) ProtoMessage() {} +func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type MemoryData struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *MemoryData) Reset() { *m = MemoryData{} } +func (m *MemoryData) String() string { return proto.CompactTextString(m) } +func (*MemoryData) ProtoMessage() {} +func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type MemoryStats struct { + Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` + Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` + SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage" json:"swap_usage,omitempty"` + KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage" json:"kernel_usage,omitempty"` + Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *MemoryStats) Reset() { *m = MemoryStats{} } +func (m *MemoryStats) String() string { return proto.CompactTextString(m) } +func (*MemoryStats) ProtoMessage() {} +func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *MemoryStats) GetUsage() *MemoryData { + if m != nil { + return m.Usage + } + return nil +} + +func (m *MemoryStats) GetSwapUsage() *MemoryData { + if m != nil { + return m.SwapUsage + } + return nil +} + +func (m *MemoryStats) GetKernelUsage() *MemoryData { + if m != nil { + return m.KernelUsage + } + return nil +} + +func (m *MemoryStats) GetStats() map[string]uint64 { + if m != nil { + return m.Stats + } + return nil +} + +type BlkioStatsEntry struct { + Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` + Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` +} + +func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } +func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } +func (*BlkioStatsEntry) ProtoMessage() {} +func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type BlkioStats struct { + IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive" json:"sectors_recursive,omitempty"` +} + +func (m *BlkioStats) Reset() { *m = BlkioStats{} } +func (m *BlkioStats) String() string { return proto.CompactTextString(m) } +func (*BlkioStats) ProtoMessage() {} +func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceBytesRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServicedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoQueuedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoServiceTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoWaitTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoMergedRecursive + } + return nil +} + +func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { + if m != nil { + return m.IoTimeRecursive + } + return nil +} + +func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { + if m != nil { + return m.SectorsRecursive + } + return nil +} + +type HugetlbStats struct { + Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` + MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage" json:"max_usage,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` + Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` +} + +func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } +func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } +func (*HugetlbStats) ProtoMessage() {} +func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type CgroupStats struct { + CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats" json:"cpu_stats,omitempty"` + MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats" json:"memory_stats,omitempty"` + BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats" json:"blkio_stats,omitempty"` + HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats" json:"pids_stats,omitempty"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (m *CgroupStats) String() string { return proto.CompactTextString(m) } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *CgroupStats) GetCpuStats() *CpuStats { + if m != nil { + return m.CpuStats + } + return nil +} + +func (m *CgroupStats) GetMemoryStats() *MemoryStats { + if m != nil { + return m.MemoryStats + } + return nil +} + +func (m *CgroupStats) GetBlkioStats() *BlkioStats { + if m != nil { + return m.BlkioStats + } + return nil +} + +func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { + if m != nil { + return m.HugetlbStats + } + return nil +} + +func (m *CgroupStats) GetPidsStats() *PidsStats { + if m != nil { + return m.PidsStats + } + return nil +} + +type StatsResponse struct { + NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats" json:"network_stats,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats" json:"cgroup_stats,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *StatsResponse) Reset() { *m = StatsResponse{} } +func (m *StatsResponse) String() string { return proto.CompactTextString(m) } +func (*StatsResponse) ProtoMessage() {} +func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *StatsResponse) GetNetworkStats() []*NetworkStats { + if m != nil { + return m.NetworkStats + } + return nil +} + +func (m *StatsResponse) GetCgroupStats() *CgroupStats { + if m != nil { + return m.CgroupStats + } + return nil +} + +type StatsRequest struct { + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func init() { + proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") + proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") + proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") + proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") + proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") + proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") + proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") + proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") + proto.RegisterType((*Rlimit)(nil), "types.Rlimit") + proto.RegisterType((*User)(nil), "types.User") + proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") + proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") + proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") + proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") + proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") + proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") + proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") + proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") + proto.RegisterType((*StateRequest)(nil), "types.StateRequest") + proto.RegisterType((*ContainerState)(nil), "types.ContainerState") + proto.RegisterType((*Process)(nil), "types.Process") + proto.RegisterType((*Container)(nil), "types.Container") + proto.RegisterType((*Machine)(nil), "types.Machine") + proto.RegisterType((*StateResponse)(nil), "types.StateResponse") + proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") + proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") + proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") + proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") + proto.RegisterType((*Event)(nil), "types.Event") + proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") + proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") + proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") + proto.RegisterType((*CpuStats)(nil), "types.CpuStats") + proto.RegisterType((*PidsStats)(nil), "types.PidsStats") + proto.RegisterType((*MemoryData)(nil), "types.MemoryData") + proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") + proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") + proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") + proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") + proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") + proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") + proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion2 + +// Client API for API service + +type APIClient interface { + GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) + CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) + UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) + AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) + CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) + DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) + ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) + State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) + Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) + Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) +} + +type aPIClient struct { + cc *grpc.ClientConn +} + +func NewAPIClient(cc *grpc.ClientConn) APIClient { + return &aPIClient{cc} +} + +func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { + out := new(GetServerVersionResponse) + err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { + out := new(SignalResponse) + err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { + out := new(UpdateProcessResponse) + err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { + out := new(AddProcessResponse) + err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { + out := new(CreateCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { + out := new(DeleteCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { + out := new(ListCheckpointResponse) + err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) + if err != nil { + return nil, err + } + x := &aPIEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_EventsClient interface { + Recv() (*Event, error) + grpc.ClientStream +} + +type aPIEventsClient struct { + grpc.ClientStream +} + +func (x *aPIEventsClient) Recv() (*Event, error) { + m := new(Event) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { + out := new(StatsResponse) + err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for API service + +type APIServer interface { + GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Signal(context.Context, *SignalRequest) (*SignalResponse, error) + UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) + AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) + CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) + DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) + ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) + State(context.Context, *StateRequest) (*StateResponse, error) + Events(*EventsRequest, API_EventsServer) error + Stats(context.Context, *StatsRequest) (*StatsResponse, error) +} + +func RegisterAPIServer(s *grpc.Server, srv APIServer) { + s.RegisterService(&_API_serviceDesc, srv) +} + +func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServerVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetServerVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/GetServerVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignalRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Signal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Signal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).UpdateProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/UpdateProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).AddProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/AddProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/CreateCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).DeleteCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/DeleteCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListCheckpointRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListCheckpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/ListCheckpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).State(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/State", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).State(ctx, req.(*StateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).Events(m, &aPIEventsServer{stream}) +} + +type API_EventsServer interface { + Send(*Event) error + grpc.ServerStream +} + +type aPIEventsServer struct { + grpc.ServerStream +} + +func (x *aPIEventsServer) Send(m *Event) error { + return x.ServerStream.SendMsg(m) +} + +func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Stats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.API/Stats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _API_serviceDesc = grpc.ServiceDesc{ + ServiceName: "types.API", + HandlerType: (*APIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetServerVersion", + Handler: _API_GetServerVersion_Handler, + }, + { + MethodName: "CreateContainer", + Handler: _API_CreateContainer_Handler, + }, + { + MethodName: "UpdateContainer", + Handler: _API_UpdateContainer_Handler, + }, + { + MethodName: "Signal", + Handler: _API_Signal_Handler, + }, + { + MethodName: "UpdateProcess", + Handler: _API_UpdateProcess_Handler, + }, + { + MethodName: "AddProcess", + Handler: _API_AddProcess_Handler, + }, + { + MethodName: "CreateCheckpoint", + Handler: _API_CreateCheckpoint_Handler, + }, + { + MethodName: "DeleteCheckpoint", + Handler: _API_DeleteCheckpoint_Handler, + }, + { + MethodName: "ListCheckpoint", + Handler: _API_ListCheckpoint_Handler, + }, + { + MethodName: "State", + Handler: _API_State_Handler, + }, + { + MethodName: "Stats", + Handler: _API_Stats_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Events", + Handler: _API_Events_Handler, + ServerStreams: true, + }, + }, +} + +var fileDescriptor0 = []byte{ + // 1918 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x58, 0x5b, 0x8f, 0x1b, 0x49, + 0x15, 0xce, 0xd8, 0x1e, 0xcf, 0xf8, 0xf8, 0x32, 0x99, 0xce, 0x5c, 0x1c, 0x2f, 0x9b, 0x0d, 0xcd, + 0xc2, 0x46, 0xb0, 0x1a, 0x2d, 0x5e, 0x2e, 0x4b, 0x90, 0x10, 0x61, 0xb2, 0xda, 0x05, 0x25, 0xc1, + 0x3b, 0x93, 0x2c, 0xe2, 0xc9, 0xea, 0xe9, 0xae, 0xd8, 0xc5, 0xb4, 0xbb, 0x9b, 0xaa, 0xea, 0xb9, + 0xfc, 0x08, 0x7e, 0x09, 0x12, 0xe2, 0x89, 0x1f, 0xc0, 0xff, 0xe0, 0x8d, 0x27, 0x24, 0xfe, 0x03, + 0xa7, 0x4e, 0x55, 0xb5, 0xbb, 0xdb, 0xf6, 0x04, 0x09, 0xf1, 0xb0, 0x2f, 0x96, 0xab, 0xea, 0x5c, + 0xbf, 0x73, 0xa9, 0x53, 0x0d, 0x9d, 0x20, 0xe3, 0x27, 0x99, 0x48, 0x55, 0xea, 0x6d, 0xab, 0xdb, + 0x8c, 0x49, 0xff, 0x21, 0x1c, 0x7f, 0xc1, 0xd4, 0x39, 0x13, 0x57, 0x4c, 0x7c, 0xcd, 0x84, 0xe4, + 0x69, 0x72, 0xc6, 0xfe, 0x98, 0x33, 0xa9, 0xfc, 0xdf, 0xc3, 0x70, 0xf5, 0x48, 0x66, 0x69, 0x22, + 0x99, 0xd7, 0x87, 0xed, 0x45, 0xf0, 0x87, 0x54, 0x0c, 0xb7, 0x1e, 0x6f, 0x3d, 0xe9, 0xd3, 0x92, + 0x27, 0xb8, 0x6c, 0xb8, 0x65, 0x16, 0xa8, 0x70, 0x3e, 0x6c, 0xd2, 0xf2, 0x3e, 0xec, 0x0a, 0x76, + 0xc5, 0xb5, 0x80, 0x61, 0x0b, 0x77, 0x3a, 0xfe, 0x05, 0x1c, 0xbc, 0xc9, 0xa2, 0x40, 0xb1, 0x89, + 0x48, 0x43, 0x26, 0xa5, 0x55, 0xe9, 0x01, 0x34, 0x78, 0x44, 0x32, 0x3b, 0x5e, 0x17, 0x9a, 0x19, + 0x2e, 0x1a, 0xb4, 0xc0, 0x93, 0x30, 0x4e, 0x25, 0x3b, 0x57, 0x11, 0x4f, 0x48, 0xec, 0xae, 0xd6, + 0x72, 0xcd, 0x23, 0x35, 0x27, 0x99, 0x7d, 0x6f, 0x00, 0xed, 0x39, 0xe3, 0xb3, 0xb9, 0x1a, 0x6e, + 0xeb, 0xb5, 0x7f, 0x0c, 0x87, 0x35, 0x1d, 0xc6, 0x76, 0xff, 0x1f, 0x5b, 0x70, 0x74, 0x2a, 0x18, + 0x9e, 0x9c, 0xa6, 0x89, 0x0a, 0x78, 0xc2, 0xc4, 0x3a, 0xfd, 0xb8, 0xb8, 0xc8, 0x93, 0x28, 0x66, + 0x93, 0x00, 0x75, 0x2c, 0xcd, 0x98, 0xb3, 0xf0, 0x32, 0x4b, 0x79, 0xa2, 0xc8, 0x8c, 0x8e, 0x36, + 0x43, 0x92, 0x55, 0xe4, 0x9a, 0x36, 0x03, 0x97, 0x69, 0x6e, 0xcc, 0x70, 0x6b, 0x26, 0xc4, 0xb0, + 0xed, 0xd6, 0x71, 0x70, 0xc1, 0x62, 0x39, 0xdc, 0x79, 0xdc, 0xc4, 0xf5, 0x03, 0xe8, 0x26, 0xe9, + 0x84, 0x5f, 0xa5, 0xea, 0x2c, 0x4d, 0xd5, 0x70, 0x97, 0x5c, 0xdb, 0x83, 0x1d, 0x91, 0x27, 0x8a, + 0x2f, 0xd8, 0xb0, 0x43, 0x5c, 0x48, 0x65, 0x37, 0x9e, 0x89, 0x99, 0x1c, 0x02, 0xb1, 0x1e, 0x42, + 0x7f, 0x69, 0xcd, 0x73, 0x2e, 0x86, 0x5d, 0x02, 0xf7, 0x17, 0x70, 0xbc, 0xe2, 0x9e, 0x0d, 0xdb, + 0x77, 0xa0, 0x13, 0xba, 0x4d, 0x72, 0xb3, 0x3b, 0xbe, 0x7f, 0x42, 0x89, 0x70, 0x52, 0x10, 0xfb, + 0x9f, 0x41, 0xff, 0x9c, 0xcf, 0x92, 0x20, 0x7e, 0x67, 0x54, 0xb4, 0x6f, 0x44, 0x69, 0x02, 0xed, + 0xdf, 0x87, 0x81, 0xe3, 0xb4, 0x58, 0xff, 0xa5, 0x01, 0xfb, 0xcf, 0xa2, 0xe8, 0x8e, 0x30, 0x63, + 0x72, 0x28, 0x26, 0x30, 0x7b, 0x50, 0x4a, 0x83, 0x9c, 0x7f, 0x08, 0xad, 0x5c, 0xa2, 0x7d, 0x4d, + 0xb2, 0xaf, 0x6b, 0xed, 0x7b, 0x83, 0x5b, 0x5e, 0x0f, 0x5a, 0x81, 0xf6, 0xbf, 0x45, 0xfe, 0xa3, + 0x2d, 0x2c, 0xb9, 0x42, 0x9c, 0xed, 0x22, 0xbc, 0x8e, 0x2c, 0xc8, 0xd6, 0xca, 0x9d, 0x6a, 0x80, + 0x76, 0x6b, 0x01, 0xea, 0xd4, 0x02, 0x04, 0xb4, 0x3e, 0x80, 0x5e, 0x18, 0x64, 0xc1, 0x05, 0x8f, + 0xb9, 0xe2, 0x4c, 0x22, 0xa8, 0x5a, 0xfc, 0x31, 0xec, 0x05, 0x59, 0x16, 0x88, 0x45, 0x2a, 0xd0, + 0x99, 0xb7, 0x3c, 0x66, 0xc3, 0x9e, 0x23, 0x97, 0x2c, 0xe6, 0x49, 0x7e, 0xf3, 0x42, 0x87, 0x75, + 0xd8, 0xa7, 0x5d, 0x24, 0x4f, 0xd2, 0x57, 0xec, 0x7a, 0x22, 0xf8, 0x15, 0xd2, 0xce, 0x50, 0xce, + 0x80, 0x9c, 0x7b, 0x84, 0x91, 0x8d, 0xf9, 0x82, 0x2b, 0x39, 0xdc, 0x43, 0xc1, 0xdd, 0x71, 0xdf, + 0xfa, 0x77, 0x46, 0xbb, 0xfe, 0x18, 0xda, 0xe6, 0x9f, 0xf6, 0x55, 0x9f, 0x58, 0x98, 0x70, 0x25, + 0xd3, 0xb7, 0x8a, 0x20, 0x6a, 0xe9, 0xd5, 0x3c, 0x10, 0x11, 0x41, 0xd4, 0xc2, 0x80, 0xb5, 0x08, + 0x1d, 0xf4, 0x3a, 0xb7, 0xb8, 0xf6, 0xf5, 0x62, 0x66, 0x03, 0xd5, 0xf7, 0x8e, 0x60, 0x10, 0x44, + 0x11, 0xfa, 0x93, 0x22, 0xcc, 0x5f, 0xf0, 0x48, 0x22, 0x67, 0x13, 0x03, 0x76, 0x00, 0x5e, 0x39, + 0x3a, 0x36, 0x68, 0x61, 0x91, 0x40, 0x45, 0x76, 0xad, 0x8b, 0xdc, 0x77, 0x2b, 0xc5, 0xd0, 0xa0, + 0x68, 0xed, 0xbb, 0x6c, 0x2a, 0x0e, 0x56, 0xb3, 0x94, 0xca, 0xc6, 0x1f, 0xc1, 0x70, 0x55, 0x89, + 0x35, 0xe0, 0x37, 0x70, 0xfc, 0x9c, 0xc5, 0xec, 0x5d, 0x06, 0x20, 0x0a, 0x49, 0x80, 0x25, 0x62, + 0x92, 0x71, 0xb3, 0x9e, 0x55, 0x59, 0x56, 0xcf, 0x53, 0x38, 0x7c, 0xc1, 0xa5, 0xba, 0x5b, 0xcb, + 0x8a, 0x5c, 0x52, 0x87, 0xdd, 0x11, 0x4a, 0x4e, 0x3a, 0x53, 0x0a, 0xc3, 0xd8, 0x0d, 0x57, 0x36, + 0x9f, 0x31, 0x12, 0x2a, 0xcc, 0x6c, 0xd3, 0xc2, 0x42, 0xce, 0x13, 0x7e, 0x73, 0x9e, 0x86, 0x97, + 0x4c, 0x49, 0xea, 0x19, 0xd4, 0xc9, 0xe4, 0x9c, 0xc5, 0x31, 0xb5, 0x8c, 0x5d, 0xff, 0x97, 0x70, + 0x54, 0x37, 0xcb, 0xd6, 0xef, 0xf7, 0xa0, 0xbb, 0xb4, 0x45, 0xa2, 0xb6, 0xe6, 0x5a, 0xcc, 0xd1, + 0xe9, 0xde, 0xb9, 0x42, 0x6c, 0xd7, 0xf8, 0xe3, 0x3f, 0x86, 0x41, 0x51, 0xeb, 0x44, 0x64, 0x2a, + 0x20, 0x50, 0xb9, 0xb4, 0x14, 0x7f, 0x6e, 0xc0, 0x8e, 0xcd, 0x09, 0x57, 0x49, 0xff, 0xc7, 0x5a, + 0xdd, 0x87, 0x8e, 0xbc, 0x95, 0x8a, 0x2d, 0x26, 0xb6, 0x62, 0xfb, 0xdf, 0xac, 0x8a, 0xfd, 0xd3, + 0x16, 0x74, 0x0a, 0x40, 0xdf, 0x79, 0x83, 0x7c, 0x1b, 0x3a, 0x99, 0x81, 0x96, 0x99, 0x22, 0xec, + 0x8e, 0x07, 0x56, 0x9e, 0x83, 0x7c, 0x19, 0x8e, 0x56, 0xed, 0xc6, 0x30, 0xe8, 0x21, 0xb0, 0x99, + 0x2e, 0xe1, 0xb6, 0x2e, 0xe1, 0xf2, 0x55, 0x41, 0xed, 0xce, 0xff, 0x08, 0x76, 0x5e, 0x06, 0xe1, + 0x1c, 0xad, 0xd1, 0x94, 0x61, 0x66, 0xc3, 0x4a, 0x17, 0xe4, 0x82, 0x21, 0x1a, 0xb7, 0xa6, 0x89, + 0xf8, 0x5f, 0x63, 0x9f, 0x37, 0x49, 0x62, 0xb3, 0xeb, 0x43, 0x2c, 0x68, 0xe7, 0x88, 0x4b, 0xae, + 0x95, 0xeb, 0xc1, 0xfb, 0x00, 0x76, 0x16, 0x46, 0xbe, 0xad, 0x79, 0x67, 0xbf, 0xd5, 0xea, 0x5f, + 0xc2, 0x91, 0xb9, 0x78, 0xef, 0xbc, 0x5e, 0x57, 0x2e, 0x12, 0xe3, 0xb2, 0xb9, 0x53, 0x9f, 0x40, + 0x47, 0x30, 0x99, 0xe6, 0x02, 0x01, 0x21, 0x14, 0xba, 0xe3, 0x43, 0x97, 0x5b, 0x24, 0xfa, 0xcc, + 0x9e, 0xfa, 0xff, 0xdc, 0x82, 0x41, 0x75, 0x4b, 0x97, 0xd8, 0x45, 0x7c, 0xc9, 0xd3, 0xdf, 0x99, + 0x69, 0xc0, 0x38, 0x8f, 0x59, 0x86, 0x50, 0x9c, 0x63, 0xd7, 0x44, 0x89, 0x8d, 0xd2, 0xd6, 0x84, + 0x09, 0x9e, 0x46, 0xcb, 0x49, 0x05, 0xb7, 0xbe, 0xca, 0x53, 0x15, 0xd8, 0xa9, 0x42, 0xdf, 0xf8, + 0x08, 0x21, 0x53, 0xa7, 0x1a, 0xc8, 0xed, 0x62, 0x0a, 0xa0, 0xbd, 0x97, 0x6c, 0x21, 0x6d, 0x16, + 0xa3, 0x52, 0x03, 0xee, 0x0b, 0x9d, 0x14, 0x36, 0x8f, 0x91, 0xd0, 0x6c, 0x9e, 0x5f, 0x07, 0x19, + 0x25, 0x73, 0x1f, 0x2b, 0x66, 0xdf, 0xec, 0xa1, 0xbd, 0x38, 0x5a, 0x05, 0xba, 0x27, 0x53, 0x5e, + 0xd3, 0xd1, 0x25, 0x13, 0x09, 0x8b, 0x5f, 0x96, 0x24, 0x01, 0xdd, 0xac, 0x38, 0xa6, 0xad, 0x60, + 0x6a, 0x9b, 0x98, 0x0f, 0xfd, 0xcf, 0xaf, 0x18, 0xb6, 0x03, 0x87, 0x32, 0xfa, 0xa5, 0xd3, 0x01, + 0x01, 0x5d, 0x64, 0xe4, 0x7d, 0xcb, 0xff, 0x0a, 0xb6, 0x89, 0xa6, 0x76, 0xa9, 0x98, 0x78, 0xac, + 0x0b, 0x41, 0xdf, 0xc5, 0xa7, 0xe5, 0x6a, 0x74, 0x29, 0x72, 0x9b, 0x44, 0xfe, 0x6d, 0x0b, 0x7a, + 0xaf, 0x98, 0xba, 0x4e, 0xc5, 0xa5, 0xce, 0x22, 0x59, 0x6b, 0x81, 0x7a, 0xe6, 0xbb, 0x99, 0x5e, + 0xdc, 0x2a, 0x0b, 0x77, 0x4b, 0x83, 0x81, 0x3b, 0x93, 0xc0, 0x34, 0x3e, 0xba, 0xb9, 0xb4, 0xdc, + 0xb3, 0x9b, 0x29, 0x56, 0x72, 0x2a, 0x4c, 0x9c, 0x89, 0x0c, 0xb7, 0x22, 0x91, 0x66, 0x19, 0x8b, + 0x8c, 0x2e, 0x2d, 0xec, 0xb5, 0x13, 0xd6, 0x76, 0x54, 0xb8, 0x93, 0x59, 0x61, 0x3b, 0x4e, 0xd8, + 0xeb, 0x42, 0xd8, 0x6e, 0x89, 0xcc, 0x09, 0xeb, 0x90, 0xe1, 0x0b, 0xd8, 0xc5, 0x58, 0xbe, 0x91, + 0xc1, 0x8c, 0x52, 0x45, 0x61, 0xac, 0xe3, 0x69, 0xae, 0x97, 0x06, 0x2c, 0xdd, 0x1f, 0x32, 0x26, + 0x30, 0xc2, 0x76, 0xb7, 0x81, 0x85, 0xd0, 0xf2, 0xde, 0x83, 0x07, 0xb4, 0x9c, 0xf2, 0x64, 0x6a, + 0xa2, 0xb4, 0x48, 0x23, 0x66, 0xfd, 0xc0, 0xc8, 0x15, 0x87, 0xba, 0x1f, 0xd2, 0x11, 0xf9, 0xe3, + 0xbf, 0x86, 0xc1, 0xeb, 0x39, 0x4e, 0xdc, 0x0a, 0x3b, 0xce, 0xec, 0x79, 0xa0, 0x02, 0x5d, 0xb1, + 0x19, 0x25, 0x9d, 0xb4, 0x0a, 0x91, 0x5b, 0x19, 0x12, 0x16, 0x4d, 0xdd, 0x91, 0x01, 0x0d, 0x2f, + 0xee, 0xe5, 0x11, 0x15, 0xb9, 0xb9, 0xf2, 0x15, 0x39, 0x61, 0x80, 0xf7, 0x29, 0x8f, 0x4b, 0x2e, + 0x74, 0xc7, 0x7b, 0xae, 0x6a, 0x9d, 0xa3, 0x27, 0xb0, 0xa7, 0x0a, 0x2b, 0xa6, 0x98, 0x48, 0x81, + 0x2d, 0x5e, 0x57, 0x56, 0x35, 0x1b, 0x75, 0x8f, 0xa4, 0xa6, 0x6c, 0xc5, 0x1a, 0xad, 0x3f, 0x80, + 0x0e, 0x36, 0x69, 0x69, 0xd4, 0xa2, 0x1b, 0x61, 0x2e, 0x04, 0x66, 0x95, 0x75, 0x03, 0xbb, 0x36, + 0x75, 0x44, 0xdb, 0x5e, 0x5e, 0x01, 0x98, 0x3c, 0x26, 0x81, 0x78, 0x58, 0xc6, 0x18, 0x63, 0xb5, + 0x08, 0x6e, 0x0a, 0x80, 0xf5, 0x16, 0xca, 0x7b, 0x1b, 0xf0, 0x38, 0xb4, 0x83, 0x75, 0x49, 0x9e, + 0x01, 0xf2, 0x5f, 0x5b, 0xd0, 0x35, 0x02, 0x8d, 0x7e, 0x3c, 0x0e, 0xb1, 0xe3, 0x38, 0x89, 0x8f, + 0x9d, 0x82, 0xea, 0x20, 0x52, 0x32, 0x01, 0xe7, 0x15, 0x89, 0x75, 0x58, 0xf2, 0x68, 0x2d, 0xd9, + 0x47, 0xd0, 0x33, 0xf1, 0xb5, 0x84, 0xad, 0x4d, 0x84, 0x1f, 0xeb, 0x5b, 0x0a, 0x2d, 0xa1, 0xb6, + 0xdc, 0x1d, 0xbf, 0x5f, 0xa1, 0x20, 0x1b, 0x4f, 0xe8, 0xf7, 0xf3, 0x44, 0x89, 0xdb, 0xd1, 0xc7, + 0x00, 0xcb, 0x95, 0xae, 0xae, 0x4b, 0x76, 0x6b, 0x6b, 0x05, 0x3d, 0xb9, 0x0a, 0xe2, 0xdc, 0x02, + 0xf1, 0xb4, 0xf1, 0xd9, 0x16, 0x4e, 0x40, 0x7b, 0xbf, 0xd2, 0x3d, 0xac, 0xc4, 0x52, 0x79, 0x72, + 0xb5, 0xaa, 0x4f, 0xae, 0x96, 0x2e, 0xe5, 0x34, 0x5b, 0xbe, 0x48, 0x8c, 0x3c, 0x03, 0xdc, 0xdf, + 0x9b, 0x00, 0x4b, 0x61, 0xde, 0x53, 0x18, 0xf1, 0x74, 0xaa, 0x7b, 0x0f, 0x0f, 0x99, 0x29, 0xaa, + 0xa9, 0x60, 0x18, 0x4a, 0xc9, 0xaf, 0x98, 0xed, 0xfa, 0x47, 0xd6, 0x97, 0xba, 0x0d, 0x3f, 0x86, + 0xc3, 0x25, 0x6f, 0x54, 0x62, 0x6b, 0xdc, 0xc9, 0xf6, 0x29, 0x3c, 0x40, 0x36, 0xec, 0x4e, 0x79, + 0x85, 0xa9, 0x79, 0x27, 0xd3, 0xcf, 0xe0, 0x61, 0xc9, 0x4e, 0x9d, 0xfb, 0x25, 0xd6, 0xd6, 0x9d, + 0xac, 0x3f, 0x81, 0x23, 0x64, 0xbd, 0x0e, 0xb8, 0xaa, 0xf3, 0x6d, 0xff, 0x17, 0x76, 0x2e, 0x98, + 0x98, 0x55, 0xec, 0x6c, 0xdf, 0xc9, 0xf4, 0x43, 0xd8, 0x47, 0xa6, 0x9a, 0x9e, 0x9d, 0x77, 0xb1, + 0x48, 0x16, 0x2a, 0xec, 0x53, 0x25, 0x96, 0xdd, 0xbb, 0x58, 0xfc, 0x09, 0xf4, 0xbe, 0xcc, 0x67, + 0x4c, 0xc5, 0x17, 0x45, 0xf6, 0xff, 0x8f, 0xf5, 0xf4, 0xd7, 0x06, 0x74, 0x4f, 0x67, 0x22, 0xcd, + 0xb3, 0x4a, 0x1b, 0x31, 0x29, 0xbd, 0xd2, 0x46, 0x0c, 0xcd, 0x13, 0xe8, 0x99, 0xcb, 0xcb, 0x92, + 0x99, 0x5a, 0xf3, 0x56, 0x33, 0x5f, 0x4f, 0xaa, 0x74, 0x09, 0x5b, 0xc2, 0x6a, 0xb5, 0x95, 0xb2, + 0xf1, 0xe7, 0xd0, 0x9f, 0x1b, 0xbf, 0x2c, 0xa5, 0x89, 0xec, 0x87, 0x4e, 0xf3, 0xd2, 0xc0, 0x93, + 0xb2, 0xff, 0x06, 0x47, 0x1c, 0x58, 0xf4, 0x24, 0x34, 0x75, 0x65, 0x58, 0x7e, 0xcf, 0x16, 0x8d, + 0x6a, 0xf4, 0x25, 0xec, 0xaf, 0xb2, 0x56, 0x0a, 0xd0, 0x2f, 0x17, 0x60, 0x77, 0xfc, 0xc0, 0x8a, + 0x28, 0x73, 0x51, 0x55, 0xde, 0x98, 0x89, 0xa9, 0x78, 0x29, 0x79, 0xdf, 0x87, 0x7e, 0x62, 0xee, + 0xc0, 0x02, 0xb7, 0x66, 0x49, 0x40, 0xe5, 0x7e, 0x44, 0xec, 0x42, 0xf2, 0x66, 0x2d, 0x76, 0xe5, + 0x48, 0x54, 0x6e, 0x5b, 0xd3, 0x79, 0xed, 0x40, 0xbf, 0xee, 0x05, 0x3d, 0xfe, 0x77, 0x1b, 0x9a, + 0xcf, 0x26, 0xbf, 0xf6, 0xde, 0xc0, 0xfd, 0xfa, 0xf7, 0x1a, 0xef, 0x91, 0x15, 0xbf, 0xe1, 0x1b, + 0xcf, 0xe8, 0x83, 0x8d, 0xe7, 0x76, 0xba, 0xb8, 0xe7, 0x9d, 0xc1, 0x5e, 0xed, 0x73, 0x82, 0xe7, + 0x5a, 0xdd, 0xfa, 0xaf, 0x28, 0xa3, 0x47, 0x9b, 0x8e, 0xcb, 0x32, 0x6b, 0xe3, 0x4c, 0x21, 0x73, + 0xfd, 0xe8, 0x58, 0xc8, 0xdc, 0x34, 0x05, 0xdd, 0xf3, 0x7e, 0x0a, 0x6d, 0xf3, 0xf1, 0xc1, 0x3b, + 0xb0, 0xb4, 0x95, 0xaf, 0x18, 0xa3, 0xc3, 0xda, 0x6e, 0xc1, 0xf8, 0x02, 0xfa, 0x95, 0x0f, 0x45, + 0xde, 0x7b, 0x15, 0x5d, 0xd5, 0x6f, 0x17, 0xa3, 0x6f, 0xad, 0x3f, 0x2c, 0xa4, 0x9d, 0x02, 0x2c, + 0x9f, 0xd4, 0xde, 0xd0, 0x52, 0xaf, 0x7c, 0x03, 0x19, 0x3d, 0x5c, 0x73, 0x52, 0x08, 0xc1, 0x50, + 0xd6, 0x1f, 0xc7, 0x5e, 0x0d, 0xd5, 0xfa, 0x9b, 0xb5, 0x08, 0xe5, 0xc6, 0x57, 0x35, 0x89, 0xad, + 0xbf, 0x85, 0x0b, 0xb1, 0x1b, 0x1e, 0xdc, 0x85, 0xd8, 0x8d, 0x8f, 0xe8, 0x7b, 0xde, 0x6f, 0x61, + 0x50, 0x7d, 0xaf, 0x7a, 0x0e, 0xa4, 0xb5, 0xaf, 0xeb, 0xd1, 0xfb, 0x1b, 0x4e, 0x0b, 0x81, 0x3f, + 0x82, 0x6d, 0xf3, 0x32, 0x75, 0x85, 0x54, 0x7e, 0xcc, 0x8e, 0x0e, 0xaa, 0x9b, 0x05, 0xd7, 0x27, + 0xd0, 0x36, 0x83, 0x70, 0x91, 0x00, 0x95, 0xb9, 0x78, 0xd4, 0x2b, 0xef, 0xfa, 0xf7, 0x3e, 0xd9, + 0x72, 0x7a, 0x64, 0x45, 0x8f, 0x5c, 0xa7, 0xa7, 0x14, 0x9c, 0x8b, 0x36, 0x7d, 0x40, 0xfd, 0xf4, + 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfa, 0x57, 0x5a, 0x4d, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/github.com/docker/containerd/api/grpc/types/api.proto new file mode 100644 index 00000000..63a07bda --- /dev/null +++ b/vendor/github.com/docker/containerd/api/grpc/types/api.proto @@ -0,0 +1,311 @@ +syntax = "proto3"; + +package types; + +service API { + rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} + rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} + rpc Signal(SignalRequest) returns (SignalResponse) {} + rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} + rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} + rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} + rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} + rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} + rpc State(StateRequest) returns (StateResponse) {} + rpc Events(EventsRequest) returns (stream Event) {} + rpc Stats(StatsRequest) returns (StatsResponse) {} +} + +message GetServerVersionRequest { +} + +message GetServerVersionResponse { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; + string revision = 4; +} + +message UpdateProcessRequest { + string id = 1; + string pid = 2; + bool closeStdin = 3; // Close stdin of the container + uint32 width = 4; + uint32 height = 5; +} + +message UpdateProcessResponse { +} + +message CreateContainerRequest { + string id = 1; // ID of container + string bundlePath = 2; // path to OCI bundle + string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) + string stdin = 4; // path to the file where stdin will be read (optional) + string stdout = 5; // path to file where stdout will be written (optional) + string stderr = 6; // path to file where stderr will be written (optional) + repeated string labels = 7; + bool noPivotRoot = 8; + string runtime = 9; + repeated string runtimeArgs = 10; + string checkpointDir = 11; // Directory where checkpoints are stored +} + +message CreateContainerResponse { + Container container = 1; +} + +message SignalRequest { + string id = 1; // ID of container + string pid = 2; // PID of process inside container + uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" +} + +message SignalResponse { +} + +message AddProcessRequest { + string id = 1; // ID of container + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + string pid = 7; // Process ID + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Rlimit { + string type = 1; + uint64 soft = 2; + uint64 hard = 3; +} + +message User { + uint32 uid = 1; // UID of user + uint32 gid = 2; // GID of user + repeated uint32 additionalGids = 3; // Additional groups to which user will be added +} + +message AddProcessResponse { +} + +message CreateCheckpointRequest { + string id = 1; // ID of container + Checkpoint checkpoint = 2; // Checkpoint configuration + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message CreateCheckpointResponse { +} + +message DeleteCheckpointRequest { + string id = 1; // ID of container + string name = 2; // Name of checkpoint + string checkpointDir = 3; // Directory where checkpoints are stored +} + +message DeleteCheckpointResponse { +} + +message ListCheckpointRequest { + string id = 1; // ID of container + string checkpointDir = 2; // Directory where checkpoints are stored +} + +message Checkpoint { + string name = 1; // Name of checkpoint + bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not + bool tcp = 3; // allow open tcp connections + bool unixSockets = 4; // allow external unix sockets + bool shell = 5; // allow shell-jobs +} + +message ListCheckpointResponse { + repeated Checkpoint checkpoints = 1; // List of checkpoints +} + +message StateRequest { + string id = 1; // container id for a single container +} + +message ContainerState { + string status = 1; +} + +message Process { + string pid = 1; + bool terminal = 2; // Use tty for container stdio + User user = 3; // User under which process will be run + repeated string args = 4; // Arguments for process, first is binary path itself + repeated string env = 5; // List of environment variables for process + string cwd = 6; // Workind directory of process + uint32 systemPid = 7; + string stdin = 8; // path to the file where stdin will be read (optional) + string stdout = 9; // path to file where stdout will be written (optional) + string stderr = 10; // path to file where stderr will be written (optional) + repeated string capabilities = 11; + string apparmorProfile = 12; + string selinuxLabel = 13; + bool noNewPrivileges = 14; + repeated Rlimit rlimits = 15; +} + +message Container { + string id = 1; // ID of container + string bundlePath = 2; // Path to OCI bundle + repeated Process processes = 3; // List of processes which run in container + string status = 4; // Container status ("running", "paused", etc.) + repeated string labels = 5; + repeated uint32 pids = 6; + string runtime = 7; // runtime used to execute the container +} + +// Machine is information about machine on which containerd is run +message Machine { + uint32 cpus = 1; // number of cpus + uint64 memory = 2; // amount of memory +} + +// StateResponse is information about containerd daemon +message StateResponse { + repeated Container containers = 1; + Machine machine = 2; +} + +message UpdateContainerRequest { + string id = 1; // ID of container + string pid = 2; + string status = 3; // Status to whcih containerd will try to change + UpdateResource resources =4; +} + +message UpdateResource { + uint32 blkioWeight =1; + uint32 cpuShares = 2; + uint32 cpuPeriod = 3; + uint32 cpuQuota = 4; + string cpusetCpus = 5; + string cpusetMems = 6; + uint32 memoryLimit = 7; + uint32 memorySwap = 8; + uint32 memoryReservation = 9; + uint32 kernelMemoryLimit = 10; +} + +message UpdateContainerResponse { +} + +message EventsRequest { + uint64 timestamp = 1; +} + +message Event { + string type = 1; + string id = 2; + uint32 status = 3; + string pid = 4; + uint64 timestamp = 5; +} + +message NetworkStats { + string name = 1; // name of network interface + uint64 rx_bytes = 2; + uint64 rx_Packets = 3; + uint64 Rx_errors = 4; + uint64 Rx_dropped = 5; + uint64 Tx_bytes = 6; + uint64 Tx_packets = 7; + uint64 Tx_errors = 8; + uint64 Tx_dropped = 9; +} + +message CpuUsage { + uint64 total_usage = 1; + repeated uint64 percpu_usage = 2; + uint64 usage_in_kernelmode = 3; + uint64 usage_in_usermode = 4; +} + +message ThrottlingData { + uint64 periods = 1; + uint64 throttled_periods = 2; + uint64 throttled_time = 3; +} + +message CpuStats { + CpuUsage cpu_usage = 1; + ThrottlingData throttling_data = 2; + uint64 system_usage = 3; +} + +message PidsStats { + uint64 current = 1; + uint64 limit = 2; +} + +message MemoryData { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message MemoryStats { + uint64 cache = 1; + MemoryData usage = 2; + MemoryData swap_usage = 3; + MemoryData kernel_usage = 4; + map stats = 5; +} + +message BlkioStatsEntry { + uint64 major = 1; + uint64 minor = 2; + string op = 3; + uint64 value = 4; +} + +message BlkioStats { + repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes tranferred to and from the block device + repeated BlkioStatsEntry io_serviced_recursive = 2; + repeated BlkioStatsEntry io_queued_recursive = 3; + repeated BlkioStatsEntry io_service_time_recursive = 4; + repeated BlkioStatsEntry io_wait_time_recursive = 5; + repeated BlkioStatsEntry io_merged_recursive = 6; + repeated BlkioStatsEntry io_time_recursive = 7; + repeated BlkioStatsEntry sectors_recursive = 8; +} + +message HugetlbStats { + uint64 usage = 1; + uint64 max_usage = 2; + uint64 failcnt = 3; + uint64 limit = 4; +} + +message CgroupStats { + CpuStats cpu_stats = 1; + MemoryStats memory_stats = 2; + BlkioStats blkio_stats = 3; + map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" + PidsStats pids_stats = 5; +} + +message StatsResponse { + repeated NetworkStats network_stats = 1; + CgroupStats cgroup_stats = 2; + uint64 timestamp = 3; +}; + +message StatsRequest { + string id = 1; +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll.go b/vendor/github.com/docker/containerd/archutils/epoll.go new file mode 100644 index 00000000..c8ade640 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll.go @@ -0,0 +1,19 @@ +// +build linux,!arm64 + +package archutils + +import ( + "syscall" +) + +func EpollCreate1(flag int) (int, error) { + return syscall.EpollCreate1(flag) +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + return syscall.EpollCtl(epfd, op, fd, event) +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + return syscall.EpollWait(epfd, events, msec) +} diff --git a/vendor/github.com/docker/containerd/archutils/epoll_arm64.go b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go new file mode 100644 index 00000000..00abc683 --- /dev/null +++ b/vendor/github.com/docker/containerd/archutils/epoll_arm64.go @@ -0,0 +1,70 @@ +// +build linux,arm64 + +package archutils + +// #include +/* +int EpollCreate1(int flag) { + return epoll_create1(flag); +} + +int EpollCtl(int efd, int op,int sfd, int events, int fd) { + struct epoll_event event; + event.events = events; + event.data.fd = fd; + + return epoll_ctl(efd, op, sfd, &event); +} + +struct event_t { + uint32_t events; + int fd; +}; + +struct epoll_event events[128]; +int run_epoll_wait(int fd, struct event_t *event) { + int n, i; + n = epoll_wait(fd, events, 128, -1); + for (i = 0; i < n; i++) { + event[i].events = events[i].events; + event[i].fd = events[i].data.fd; + } + return n; +} +*/ +import "C" + +import ( + "fmt" + "syscall" + "unsafe" +) + +func EpollCreate1(flag int) (int, error) { + fd := int(C.EpollCreate1(C.int(flag))) + if fd < 0 { + return fd, fmt.Errorf("failed to create epoll, errno is %d", fd) + } + return fd, nil +} + +func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { + errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd)) + if errno < 0 { + return fmt.Errorf("Failed to ctl epoll") + } + return nil +} + +func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { + var c_events [128]C.struct_event_t + n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events)))) + if n < 0 { + return int(n), fmt.Errorf("Failed to wait epoll") + } + for i := 0; i < n; i++ { + events[i].Fd = int32(c_events[i].fd) + events[i].Events = uint32(c_events[i].events) + } + return int(n), nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container.go b/vendor/github.com/docker/containerd/runtime/container.go new file mode 100644 index 00000000..54848b13 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container.go @@ -0,0 +1,593 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +type Container interface { + // ID returns the container ID + ID() string + // Path returns the path to the bundle + Path() string + // Start starts the init process of the container + Start(checkpointPath string, s Stdio) (Process, error) + // Exec starts another process in an existing container + Exec(string, specs.ProcessSpec, Stdio) (Process, error) + // Delete removes the container's state and any resources + Delete() error + // Processes returns all the containers processes that have been added + Processes() ([]Process, error) + // State returns the containers runtime state + State() State + // Resume resumes a paused container + Resume() error + // Pause pauses a running container + Pause() error + // RemoveProcess removes the specified process from the container + RemoveProcess(string) error + // Checkpoints returns all the checkpoints for a container + Checkpoints(checkpointDir string) ([]Checkpoint, error) + // Checkpoint creates a new checkpoint + Checkpoint(checkpoint Checkpoint, checkpointDir string) error + // DeleteCheckpoint deletes the checkpoint for the provided name + DeleteCheckpoint(name string, checkpointDir string) error + // Labels are user provided labels for the container + Labels() []string + // Pids returns all pids inside the container + Pids() ([]int, error) + // Stats returns realtime container stats and resource information + Stats() (*Stat, error) + // Name or path of the OCI compliant runtime used to execute the container + Runtime() string + // OOM signals the channel if the container received an OOM notification + OOM() (OOM, error) + // UpdateResource updates the containers resources to new values + UpdateResources(*Resource) error + + // Status return the current status of the container. + Status() (State, error) +} + +type OOM interface { + io.Closer + FD() int + ContainerID() string + Flush() + Removed() bool +} + +type Stdio struct { + Stdin string + Stdout string + Stderr string +} + +func NewStdio(stdin, stdout, stderr string) Stdio { + for _, s := range []*string{ + &stdin, &stdout, &stderr, + } { + if *s == "" { + *s = "/dev/null" + } + } + return Stdio{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + } +} + +type ContainerOpts struct { + Root string + ID string + Bundle string + Runtime string + RuntimeArgs []string + Shim string + Labels []string + NoPivotRoot bool + Timeout time.Duration +} + +// New returns a new container +func New(opts ContainerOpts) (Container, error) { + c := &container{ + root: opts.Root, + id: opts.ID, + bundle: opts.Bundle, + labels: opts.Labels, + processes: make(map[string]Process), + runtime: opts.Runtime, + runtimeArgs: opts.RuntimeArgs, + shim: opts.Shim, + noPivotRoot: opts.NoPivotRoot, + timeout: opts.Timeout, + } + if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(c.root, c.id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(state{ + Bundle: c.bundle, + Labels: c.labels, + Runtime: c.runtime, + RuntimeArgs: c.runtimeArgs, + Shim: c.shim, + NoPivotRoot: opts.NoPivotRoot, + }); err != nil { + return nil, err + } + return c, nil +} + +func Load(root, id string, timeout time.Duration) (Container, error) { + var s state + f, err := os.Open(filepath.Join(root, id, StateFile)) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + c := &container{ + root: root, + id: id, + bundle: s.Bundle, + labels: s.Labels, + runtime: s.Runtime, + runtimeArgs: s.RuntimeArgs, + shim: s.Shim, + noPivotRoot: s.NoPivotRoot, + processes: make(map[string]Process), + timeout: timeout, + } + dirs, err := ioutil.ReadDir(filepath.Join(root, id)) + if err != nil { + return nil, err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + pid := d.Name() + s, err := readProcessState(filepath.Join(root, id, pid)) + if err != nil { + return nil, err + } + p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s) + if err != nil { + logrus.WithField("id", id).WithField("pid", pid).Debug("containerd: error loading process %s", err) + continue + } + c.processes[pid] = p + } + return c, nil +} + +func readProcessState(dir string) (*ProcessState, error) { + f, err := os.Open(filepath.Join(dir, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + var s ProcessState + if err := json.NewDecoder(f).Decode(&s); err != nil { + return nil, err + } + return &s, nil +} + +type container struct { + // path to store runtime state information + root string + id string + bundle string + runtime string + runtimeArgs []string + shim string + processes map[string]Process + labels []string + oomFds []int + noPivotRoot bool + timeout time.Duration +} + +func (c *container) ID() string { + return c.id +} + +func (c *container) Path() string { + return c.bundle +} + +func (c *container) Labels() []string { + return c.labels +} + +func (c *container) readSpec() (*specs.Spec, error) { + var spec specs.Spec + f, err := os.Open(filepath.Join(c.bundle, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (c *container) Delete() error { + err := os.RemoveAll(filepath.Join(c.root, c.id)) + + args := c.runtimeArgs + args = append(args, "delete", c.id) + if derr := exec.Command(c.runtime, args...).Run(); err == nil { + err = derr + } + return err +} + +func (c *container) Processes() ([]Process, error) { + out := []Process{} + for _, p := range c.processes { + out = append(out, p) + } + return out, nil +} + +func (c *container) RemoveProcess(pid string) error { + delete(c.processes, pid) + return os.RemoveAll(filepath.Join(c.root, c.id, pid)) +} + +func (c *container) State() State { + proc := c.processes["init"] + if proc == nil { + return Stopped + } + return proc.State() +} + +func (c *container) Runtime() string { + return c.runtime +} + +func (c *container) Pause() error { + args := c.runtimeArgs + args = append(args, "pause", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Resume() error { + args := c.runtimeArgs + args = append(args, "resume", c.id) + b, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(b)) + } + return nil +} + +func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + var out []Checkpoint + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + return out, nil +} + +func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return err + } + + path := filepath.Join(checkpointDir, cpt.Name) + if err := os.Mkdir(path, 0755); err != nil { + return err + } + f, err := os.Create(filepath.Join(path, "config.json")) + if err != nil { + return err + } + cpt.Created = time.Now() + err = json.NewEncoder(f).Encode(cpt) + f.Close() + if err != nil { + return err + } + args := []string{ + "checkpoint", + "--image-path", path, + } + add := func(flags ...string) { + args = append(args, flags...) + } + add(c.runtimeArgs...) + if !cpt.Exit { + add("--leave-running") + } + if cpt.Shell { + add("--shell-job") + } + if cpt.Tcp { + add("--tcp-established") + } + if cpt.UnixSockets { + add("--ext-unix-sk") + } + add(c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return fmt.Errorf("%s: %q", err.Error(), string(out)) + } + return err +} + +func (c *container) DeleteCheckpoint(name string, checkpointDir string) error { + if checkpointDir == "" { + checkpointDir = filepath.Join(c.bundle, "checkpoints") + } + return os.RemoveAll(filepath.Join(checkpointDir, name)) +} + +func (c *container) Start(checkpointPath string, s Stdio) (Process, error) { + processRoot := filepath.Join(c.root, c.id, InitProcessID) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + checkpoint: checkpointPath, + root: processRoot, + id: InitProcessID, + c: c, + stdio: s, + spec: spec, + processSpec: specs.ProcessSpec(spec.Process), + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[InitProcessID] = p + return p, nil +} + +func (c *container) Exec(pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) { + processRoot := filepath.Join(c.root, c.id, pid) + if err := os.Mkdir(processRoot, 0755); err != nil { + return nil, err + } + defer func() { + if err != nil { + c.RemoveProcess(pid) + } + }() + spec, err := c.readSpec() + if err != nil { + return nil, err + } + config := &processConfig{ + exec: true, + id: pid, + root: processRoot, + c: c, + processSpec: pspec, + spec: spec, + stdio: s, + } + p, err := c.newProcess(config) + if err != nil { + return nil, err + } + if err := p.Start(); err != nil { + return nil, err + } + c.processes[pid] = p + return p, nil +} + +func hostIDFromMap(id uint32, mp []ocs.IDMapping) int { + for _, m := range mp { + if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) { + return int(m.HostID + (id - m.ContainerID)) + } + } + return 0 +} + +func (c *container) Pids() ([]int, error) { + args := c.runtimeArgs + args = append(args, "ps", "--format=json", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + var pids []int + if err := json.Unmarshal(out, &pids); err != nil { + return nil, err + } + return pids, nil +} + +func (c *container) Stats() (*Stat, error) { + now := time.Now() + args := c.runtimeArgs + args = append(args, "events", "--stats", c.id) + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("%s: %q", err.Error(), out) + } + s := struct { + Data *Stat `json:"data"` + }{} + if err := json.Unmarshal(out, &s); err != nil { + return nil, err + } + s.Data.Timestamp = now + return s.Data, nil +} + +// Status implements the runtime Container interface. +func (c *container) Status() (State, error) { + args := c.runtimeArgs + args = append(args, "state", c.id) + + out, err := exec.Command(c.runtime, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("%s: %q", err.Error(), out) + } + + // We only require the runtime json output to have a top level Status field. + var s struct { + Status State `json:"status"` + } + if err := json.Unmarshal(out, &s); err != nil { + return "", err + } + return s.Status, nil +} + +func (c *container) writeEventFD(root string, cfd, efd int) error { + f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) + return err +} + +func (c *container) newProcess(config *processConfig) (Process, error) { + if c.shim == "" { + return newDirectProcess(config) + } + return newProcess(config) +} + +type waitArgs struct { + pid int + err error +} + +// isAlive checks if the shim that launched the container is still alive +func isAlive(cmd *exec.Cmd) (bool, error) { + if err := syscall.Kill(cmd.Process.Pid, 0); err != nil { + if err == syscall.ESRCH { + return false, nil + } + return false, err + } + return true, nil +} + +type oom struct { + id string + root string + control *os.File + eventfd int +} + +func (o *oom) ContainerID() string { + return o.id +} + +func (o *oom) FD() int { + return o.eventfd +} + +func (o *oom) Flush() { + buf := make([]byte, 8) + syscall.Read(o.eventfd, buf) +} + +func (o *oom) Removed() bool { + _, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control")) + return os.IsNotExist(err) +} + +func (o *oom) Close() error { + err := syscall.Close(o.eventfd) + if cerr := o.control.Close(); err == nil { + err = cerr + } + return err +} + +type message struct { + Level string `json:"level"` + Msg string `json:"msg"` +} + +func readLogMessages(path string) ([]message, error) { + var out []message + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var m message + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return nil, err + } + out = append(out, m) + } + return out, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_linux.go b/vendor/github.com/docker/containerd/runtime/container_linux.go new file mode 100644 index 00000000..9e2e6d6b --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_linux.go @@ -0,0 +1,134 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "github.com/opencontainers/runc/libcontainer" + ocs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (c *container) getLibctContainer() (libcontainer.Container, error) { + runtimeRoot := "/run/runc" + + // Check that the root wasn't changed + for _, opt := range c.runtimeArgs { + if strings.HasPrefix(opt, "--root=") { + runtimeRoot = strings.TrimPrefix(opt, "--root=") + break + } + } + + f, err := libcontainer.New(runtimeRoot, libcontainer.Cgroupfs) + if err != nil { + return nil, err + } + return f.Load(c.id) +} + +func (c *container) OOM() (OOM, error) { + container, err := c.getLibctContainer() + if err != nil { + if lerr, ok := err.(libcontainer.Error); ok { + // with oom registration sometimes the container can run, exit, and be destroyed + // faster than we can get the state back so we can just ignore this + if lerr.Code() == libcontainer.ContainerNotExists { + return nil, ErrContainerExited + } + } + return nil, err + } + state, err := container.State() + if err != nil { + return nil, err + } + memoryPath := state.CgroupPaths["memory"] + return c.getMemeoryEventFD(memoryPath) +} + +func u64Ptr(i uint64) *uint64 { return &i } + +func (c *container) UpdateResources(r *Resource) error { + sr := ocs.Resources{ + Memory: &ocs.Memory{ + Limit: u64Ptr(uint64(r.Memory)), + Reservation: u64Ptr(uint64(r.MemoryReservation)), + Swap: u64Ptr(uint64(r.MemorySwap)), + Kernel: u64Ptr(uint64(r.KernelMemory)), + }, + CPU: &ocs.CPU{ + Shares: u64Ptr(uint64(r.CPUShares)), + Quota: u64Ptr(uint64(r.CPUQuota)), + Period: u64Ptr(uint64(r.CPUPeriod)), + Cpus: &r.CpusetCpus, + Mems: &r.CpusetMems, + }, + BlockIO: &ocs.BlockIO{ + Weight: &r.BlkioWeight, + }, + } + + srStr := bytes.NewBuffer(nil) + if err := json.NewEncoder(srStr).Encode(&sr); err != nil { + return err + } + + args := c.runtimeArgs + args = append(args, "update", "-r", "-", c.id) + cmd := exec.Command(c.runtime, args...) + cmd.Stdin = srStr + b, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf(string(b)) + } + return nil +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + if s == nil { + return 0, 0, nil + } + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == ocs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func (c *container) getMemeoryEventFD(root string) (*oom, error) { + f, err := os.Open(filepath.Join(root, "memory.oom_control")) + if err != nil { + return nil, err + } + fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) + if serr != 0 { + f.Close() + return nil, serr + } + if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil { + syscall.Close(int(fd)) + f.Close() + return nil, err + } + return &oom{ + root: root, + id: c.id, + eventfd: int(fd), + control: f, + }, nil +} diff --git a/vendor/github.com/docker/containerd/runtime/container_solaris.go b/vendor/github.com/docker/containerd/runtime/container_solaris.go new file mode 100644 index 00000000..4d0d8b4a --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/container_solaris.go @@ -0,0 +1,19 @@ +package runtime + +import ( + "errors" + + "github.com/docker/containerd/specs" +) + +func (c *container) OOM() (OOM, error) { + return nil, errors.New("runtime OOM() not implemented on Solaris") +} + +func (c *container) UpdateResources(r *Resource) error { + return errors.New("runtime UpdateResources() not implemented on Solaris") +} + +func getRootIDs(s *specs.Spec) (int, int, error) { + return 0, 0, errors.New("runtime getRootIDs() not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/runtime/direct_process.go b/vendor/github.com/docker/containerd/runtime/direct_process.go new file mode 100644 index 00000000..fe8ee894 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/direct_process.go @@ -0,0 +1,283 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sync" + "syscall" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/subreaper/exec" + "github.com/docker/docker/pkg/term" + "github.com/opencontainers/runc/libcontainer" +) + +type directProcess struct { + *process + sync.WaitGroup + + io stdio + console libcontainer.Console + consolePath string + exec bool + checkpoint string + specs *specs.Spec +} + +func newDirectProcess(config *processConfig) (*directProcess, error) { + lp, err := newProcess(config) + if err != nil { + return nil, err + } + + return &directProcess{ + specs: config.spec, + process: lp, + exec: config.exec, + checkpoint: config.checkpoint, + }, nil +} + +func (d *directProcess) CloseStdin() error { + if d.io.stdin != nil { + return d.io.stdin.Close() + } + return nil +} + +func (d *directProcess) Resize(w, h int) error { + if d.console == nil { + return nil + } + ws := term.Winsize{ + Width: uint16(w), + Height: uint16(h), + } + return term.SetWinsize(d.console.Fd(), &ws) +} + +func (d *directProcess) openIO() (*os.File, *os.File, *os.File, error) { + uid, gid, err := getRootIDs(d.specs) + if err != nil { + return nil, nil, nil, err + } + + if d.spec.Terminal { + console, err := libcontainer.NewConsole(uid, gid) + if err != nil { + return nil, nil, nil, err + } + d.console = console + d.consolePath = console.Path() + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY, 0) + if err != nil { + return nil, nil, nil, err + } + go io.Copy(console, stdin) + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + d.Add(1) + go func() { + io.Copy(stdout, console) + console.Close() + d.Done() + }() + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stdout + return nil, nil, nil, nil + } + + stdin, err := os.OpenFile(d.stdio.Stdin, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return nil, nil, nil, err + } + + stdout, err := os.OpenFile(d.stdio.Stdout, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + stderr, err := os.OpenFile(d.stdio.Stderr, syscall.O_RDWR, 0) + if err != nil { + return nil, nil, nil, err + } + + d.io.stdin = stdin + d.io.stdout = stdout + d.io.stderr = stderr + return stdin, stdout, stderr, nil +} + +func (d *directProcess) loadCheckpoint(bundle string) (*Checkpoint, error) { + if d.checkpoint == "" { + return nil, nil + } + + f, err := os.Open(filepath.Join(bundle, "checkpoints", d.checkpoint, "config.json")) + if err != nil { + return nil, err + } + defer f.Close() + var cpt Checkpoint + if err := json.NewDecoder(f).Decode(&cpt); err != nil { + return nil, err + } + return &cpt, nil +} + +func (d *directProcess) Start() error { + cwd, err := filepath.Abs(d.root) + if err != nil { + return err + } + + stdin, stdout, stderr, err := d.openIO() + if err != nil { + return nil + } + + checkpoint, err := d.loadCheckpoint(d.container.bundle) + if err != nil { + return err + } + + logPath := filepath.Join(cwd, "log.json") + args := append([]string{ + "--log", logPath, + "--log-format", "json", + }, d.container.runtimeArgs...) + if d.exec { + args = append(args, "exec", + "--process", filepath.Join(cwd, "process.json"), + "--console", d.consolePath, + ) + } else if checkpoint != nil { + args = append(args, "restore", + "--image-path", filepath.Join(d.container.bundle, "checkpoints", checkpoint.Name), + ) + add := func(flags ...string) { + args = append(args, flags...) + } + if checkpoint.Shell { + add("--shell-job") + } + if checkpoint.Tcp { + add("--tcp-established") + } + if checkpoint.UnixSockets { + add("--ext-unix-sk") + } + if d.container.noPivotRoot { + add("--no-pivot") + } + } else { + args = append(args, "start", + "--bundle", d.container.bundle, + "--console", d.consolePath, + ) + if d.container.noPivotRoot { + args = append(args, "--no-pivot") + } + } + args = append(args, + "-d", + "--pid-file", filepath.Join(cwd, "pid"), + d.container.id, + ) + cmd := exec.Command(d.container.runtime, args...) + cmd.Dir = d.container.bundle + cmd.Stdin = stdin + cmd.Stdout = stdout + cmd.Stderr = stderr + // set the parent death signal to SIGKILL so that if containerd dies the container + // process also dies + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } + + exitSubscription := subreaper.Subscribe() + err = d.startCmd(cmd) + if err != nil { + subreaper.Unsubscribe(exitSubscription) + d.delete() + return err + } + + go d.watch(cmd, exitSubscription) + + return nil +} + +func (d *directProcess) watch(cmd *exec.Cmd, exitSubscription *subreaper.Subscription) { + defer subreaper.Unsubscribe(exitSubscription) + defer d.delete() + + f, err := os.OpenFile(path.Join(d.root, ExitFile), syscall.O_WRONLY, 0) + if err == nil { + defer f.Close() + } + + exitCode := 0 + if err = cmd.Wait(); err != nil { + if exitError, ok := err.(exec.ExitCodeError); ok { + exitCode = exitError.Code + } + } + + if exitCode == 0 { + pid, err := d.getPidFromFile() + if err != nil { + return + } + exitSubscription.SetPid(pid) + exitCode = exitSubscription.Wait() + } + + writeInt(path.Join(d.root, ExitStatusFile), exitCode) +} + +func (d *directProcess) delete() { + if d.console != nil { + d.console.Close() + } + d.io.Close() + d.Wait() + if !d.exec { + exec.Command(d.container.runtime, append(d.container.runtimeArgs, "delete", d.container.id)...).Run() + } +} + +func writeInt(path string, i int) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + _, err = fmt.Fprintf(f, "%d", i) + return err +} + +type stdio struct { + stdin *os.File + stdout *os.File + stderr *os.File +} + +func (s stdio) Close() error { + err := s.stdin.Close() + if oerr := s.stdout.Close(); err == nil { + err = oerr + } + if oerr := s.stderr.Close(); err == nil { + err = oerr + } + return err +} diff --git a/vendor/github.com/docker/containerd/runtime/process.go b/vendor/github.com/docker/containerd/runtime/process.go new file mode 100644 index 00000000..0e8738d7 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/process.go @@ -0,0 +1,340 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/docker/containerd/specs" + "github.com/docker/containerd/subreaper/exec" + "golang.org/x/sys/unix" +) + +type Process interface { + io.Closer + + // ID of the process. + // This is either "init" when it is the container's init process or + // it is a user provided id for the process similar to the container id + ID() string + CloseStdin() error + Resize(int, int) error + // ExitFD returns the fd the provides an event when the process exits + ExitFD() int + // ExitStatus returns the exit status of the process or an error if it + // has not exited + ExitStatus() (int, error) + // Spec returns the process spec that created the process + Spec() specs.ProcessSpec + // Signal sends the provided signal to the process + Signal(os.Signal) error + // Container returns the container that the process belongs to + Container() Container + // Stdio of the container + Stdio() Stdio + // SystemPid is the pid on the system + SystemPid() int + // State returns if the process is running or not + State() State + // Start executes the process + Start() error +} + +type processConfig struct { + id string + root string + processSpec specs.ProcessSpec + spec *specs.Spec + c *container + stdio Stdio + exec bool + checkpoint string +} + +func newProcess(config *processConfig) (*process, error) { + p := &process{ + root: config.root, + id: config.id, + container: config.c, + spec: config.processSpec, + stdio: config.stdio, + } + uid, gid, err := getRootIDs(config.spec) + if err != nil { + return nil, err + } + f, err := os.Create(filepath.Join(config.root, "process.json")) + if err != nil { + return nil, err + } + defer f.Close() + + ps := ProcessState{ + ProcessSpec: config.processSpec, + Exec: config.exec, + PlatformProcessState: PlatformProcessState{ + Checkpoint: config.checkpoint, + RootUID: uid, + RootGID: gid, + }, + Stdin: config.stdio.Stdin, + Stdout: config.stdio.Stdout, + Stderr: config.stdio.Stderr, + RuntimeArgs: config.c.runtimeArgs, + NoPivotRoot: config.c.noPivotRoot, + } + + if err := json.NewEncoder(f).Encode(ps); err != nil { + return nil, err + } + exit, err := getExitPipe(filepath.Join(config.root, ExitFile)) + if err != nil { + return nil, err + } + control, err := getControlPipe(filepath.Join(config.root, ControlFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + p.controlPipe = control + return p, nil +} + +func (p *process) Start() error { + cmd := exec.Command(p.container.shim, + p.container.id, p.container.bundle, p.container.runtime, + ) + cmd.Dir = p.root + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } + + return p.startCmd(cmd) +} + +func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) { + p := &process{ + root: root, + id: id, + container: c, + spec: s.ProcessSpec, + stdio: Stdio{ + Stdin: s.Stdin, + Stdout: s.Stdout, + Stderr: s.Stderr, + }, + } + if _, err := p.getPidFromFile(); err != nil { + return nil, err + } + if _, err := p.ExitStatus(); err != nil { + if err == ErrProcessNotExited { + exit, err := getExitPipe(filepath.Join(root, ExitFile)) + if err != nil { + return nil, err + } + p.exitPipe = exit + return p, nil + } + return nil, err + } + return p, nil +} + +type process struct { + root string + id string + pid int + exitPipe *os.File + controlPipe *os.File + container *container + spec specs.ProcessSpec + stdio Stdio +} + +func (p *process) ID() string { + return p.id +} + +func (p *process) Container() Container { + return p.container +} + +func (p *process) SystemPid() int { + return p.pid +} + +// ExitFD returns the fd of the exit pipe +func (p *process) ExitFD() int { + return int(p.exitPipe.Fd()) +} + +func (p *process) CloseStdin() error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0) + return err +} + +func (p *process) Resize(w, h int) error { + _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h) + return err +} + +func (p *process) ExitStatus() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile)) + if err != nil { + if os.IsNotExist(err) { + return -1, ErrProcessNotExited + } + return -1, err + } + if len(data) == 0 { + return -1, ErrProcessNotExited + } + return strconv.Atoi(string(data)) +} + +func (p *process) Spec() specs.ProcessSpec { + return p.spec +} + +func (p *process) Stdio() Stdio { + return p.stdio +} + +// Close closes any open files and/or resouces on the process +func (p *process) Close() error { + return p.exitPipe.Close() +} + +func (p *process) State() State { + if p.pid == 0 { + return Stopped + } + err := syscall.Kill(p.pid, 0) + if err != nil && err == syscall.ESRCH { + return Stopped + } + return Running +} + +func (p *process) getPidFromFile() (int, error) { + data, err := ioutil.ReadFile(filepath.Join(p.root, "pid")) + if err != nil { + return -1, err + } + i, err := strconv.Atoi(string(data)) + if err != nil { + return -1, errInvalidPidInt + } + p.pid = i + return i, nil +} + +func getExitPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + // add NONBLOCK in case the other side has already closed or else + // this function would never return + return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) +} + +func getControlPipe(path string) (*os.File, error) { + if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { + return nil, err + } + return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0) +} + +// Signal sends the provided signal to the process +func (p *process) Signal(s os.Signal) error { + return syscall.Kill(p.pid, s.(syscall.Signal)) +} + +func (p *process) startCmd(cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + if exErr, ok := err.(*exec.Error); ok { + if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { + return fmt.Errorf("%s not installed on system", p.container.shim) + } + } + return err + } + if err := p.waitForStart(cmd); err != nil { + return err + } + return nil +} + +func (p *process) waitForStart(cmd *exec.Cmd) error { + wc := make(chan error, 1) + go func() { + for { + if _, err := p.getPidFromFile(); err != nil { + if os.IsNotExist(err) || err == errInvalidPidInt { + alive, err := isAlive(cmd) + if err != nil { + wc <- err + return + } + if !alive { + // runc could have failed to run the container so lets get the error + // out of the logs or the shim could have encountered an error + messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json")) + if err != nil && !os.IsNotExist(err) { + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("shim error: %v", m.Msg) + return + } + } + // no errors reported back from shim, check for runc/runtime errors + messages, err = readLogMessages(filepath.Join(p.root, "log.json")) + if err != nil { + if os.IsNotExist(err) { + err = ErrContainerNotStarted + } + wc <- err + return + } + for _, m := range messages { + if m.Level == "error" { + wc <- fmt.Errorf("oci runtime error: %v", m.Msg) + return + } + } + wc <- ErrContainerNotStarted + return + } + time.Sleep(15 * time.Millisecond) + continue + } + wc <- err + return + } + // the pid file was read successfully + wc <- nil + return + } + }() + select { + case err := <-wc: + if err != nil { + return err + } + return nil + case <-time.After(p.container.timeout): + cmd.Process.Kill() + cmd.Wait() + return ErrContainerStartTimeout + } +} diff --git a/vendor/github.com/docker/containerd/runtime/runtime.go b/vendor/github.com/docker/containerd/runtime/runtime.go new file mode 100644 index 00000000..ff2094d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/runtime.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "time" + + "github.com/docker/containerd/specs" +) + +var ( + ErrNotChildProcess = errors.New("containerd: not a child process for container") + ErrInvalidContainerType = errors.New("containerd: invalid container type for runtime") + ErrCheckpointNotExists = errors.New("containerd: checkpoint does not exist for container") + ErrCheckpointExists = errors.New("containerd: checkpoint already exists") + ErrContainerExited = errors.New("containerd: container has exited") + ErrTerminalsNotSupported = errors.New("containerd: terminals are not supported for runtime") + ErrProcessNotExited = errors.New("containerd: process has not exited") + ErrProcessExited = errors.New("containerd: process has exited") + ErrContainerNotStarted = errors.New("containerd: container not started") + ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout") + + errNoPidFile = errors.New("containerd: no process pid file found") + errInvalidPidInt = errors.New("containerd: process pid is invalid") + errNotImplemented = errors.New("containerd: not implemented") +) + +const ( + ExitFile = "exit" + ExitStatusFile = "exitStatus" + StateFile = "state.json" + ControlFile = "control" + InitProcessID = "init" +) + +type Checkpoint struct { + // Timestamp is the time that checkpoint happened + Created time.Time `json:"created"` + // Name is the name of the checkpoint + Name string `json:"name"` + // Tcp checkpoints open tcp connections + Tcp bool `json:"tcp"` + // UnixSockets persists unix sockets in the checkpoint + UnixSockets bool `json:"unixSockets"` + // Shell persists tty sessions in the checkpoint + Shell bool `json:"shell"` + // Exit exits the container after the checkpoint is finished + Exit bool `json:"exit"` +} + +// PlatformProcessState container platform-specific fields in the ProcessState structure +type PlatformProcessState struct { + Checkpoint string `json:"checkpoint"` + RootUID int `json:"rootUID"` + RootGID int `json:"rootGID"` +} +type State string + +type Resource struct { + CPUShares int64 + BlkioWeight uint16 + CPUPeriod int64 + CPUQuota int64 + CpusetCpus string + CpusetMems string + KernelMemory int64 + Memory int64 + MemoryReservation int64 + MemorySwap int64 +} + +const ( + Paused = State("paused") + Stopped = State("stopped") + Running = State("running") +) + +type state struct { + Bundle string `json:"bundle"` + Labels []string `json:"labels"` + Stdin string `json:"stdin"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + Runtime string `json:"runtime"` + RuntimeArgs []string `json:"runtimeArgs"` + Shim string `json:"shim"` + NoPivotRoot bool `json:"noPivotRoot"` +} + +type ProcessState struct { + specs.ProcessSpec + Exec bool `json:"exec"` + Stdin string `json:"containerdStdin"` + Stdout string `json:"containerdStdout"` + Stderr string `json:"containerdStderr"` + RuntimeArgs []string `json:"runtimeArgs"` + NoPivotRoot bool `json:"noPivotRoot"` + + PlatformProcessState +} diff --git a/vendor/github.com/docker/containerd/runtime/stats.go b/vendor/github.com/docker/containerd/runtime/stats.go new file mode 100644 index 00000000..de160bf6 --- /dev/null +++ b/vendor/github.com/docker/containerd/runtime/stats.go @@ -0,0 +1,77 @@ +package runtime + +import "time" + +type Stat struct { + // Timestamp is the time that the statistics where collected + Timestamp time.Time + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/docker/containerd/specs/spec_linux.go b/vendor/github.com/docker/containerd/specs/spec_linux.go new file mode 100644 index 00000000..205f1c81 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_linux.go @@ -0,0 +1,9 @@ +package specs + +import ocs "github.com/opencontainers/runtime-spec/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec + Rlimit ocs.Rlimit +) diff --git a/vendor/github.com/docker/containerd/specs/spec_solaris.go b/vendor/github.com/docker/containerd/specs/spec_solaris.go new file mode 100644 index 00000000..625d27f1 --- /dev/null +++ b/vendor/github.com/docker/containerd/specs/spec_solaris.go @@ -0,0 +1,8 @@ +package specs + +import ocs "github.com/opencontainers/specs/specs-go" + +type ( + ProcessSpec ocs.Process + Spec ocs.Spec +) diff --git a/vendor/github.com/docker/containerd/supervisor/add_process.go b/vendor/github.com/docker/containerd/supervisor/add_process.go new file mode 100644 index 00000000..93678ddc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/add_process.go @@ -0,0 +1,43 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" + "github.com/docker/containerd/specs" +) + +type AddProcessTask struct { + baseTask + ID string + PID string + Stdout string + Stderr string + Stdin string + ProcessSpec *specs.ProcessSpec + StartResponse chan StartResponse +} + +func (s *Supervisor) addProcess(t *AddProcessTask) error { + start := time.Now() + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + process, err := ci.container.Exec(t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + return err + } + if err := s.monitorProcess(process); err != nil { + return err + } + ExecProcessTimer.UpdateSince(start) + t.StartResponse <- StartResponse{} + s.notifySubscribers(Event{ + Timestamp: time.Now(), + Type: StateStartProcess, + PID: t.PID, + ID: t.ID, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/checkpoint.go b/vendor/github.com/docker/containerd/supervisor/checkpoint.go new file mode 100644 index 00000000..fb201f5c --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/checkpoint.go @@ -0,0 +1,35 @@ +// +build !windows + +package supervisor + +import "github.com/docker/containerd/runtime" + +type CreateCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir) +} + +type DeleteCheckpointTask struct { + baseTask + ID string + CheckpointDir string + Checkpoint *runtime.Checkpoint +} + +func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + return i.container.DeleteCheckpoint(t.Checkpoint.Name, t.CheckpointDir) +} diff --git a/vendor/github.com/docker/containerd/supervisor/create.go b/vendor/github.com/docker/containerd/supervisor/create.go new file mode 100644 index 00000000..12b1cd75 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create.go @@ -0,0 +1,67 @@ +package supervisor + +import ( + "path/filepath" + "time" + + "github.com/docker/containerd/runtime" +) + +type StartTask struct { + baseTask + ID string + BundlePath string + Stdout string + Stderr string + Stdin string + StartResponse chan StartResponse + Labels []string + NoPivotRoot bool + Checkpoint *runtime.Checkpoint + CheckpointDir string + Runtime string + RuntimeArgs []string +} + +func (s *Supervisor) start(t *StartTask) error { + start := time.Now() + rt := s.runtime + rtArgs := s.runtimeArgs + if t.Runtime != "" { + rt = t.Runtime + rtArgs = t.RuntimeArgs + } + container, err := runtime.New(runtime.ContainerOpts{ + Root: s.stateDir, + ID: t.ID, + Bundle: t.BundlePath, + Runtime: rt, + RuntimeArgs: rtArgs, + Shim: s.shim, + Labels: t.Labels, + NoPivotRoot: t.NoPivotRoot, + Timeout: s.timeout, + }) + if err != nil { + return err + } + s.containers[t.ID] = &containerInfo{ + container: container, + } + ContainersCounter.Inc(1) + task := &startTask{ + Err: t.ErrorCh(), + Container: container, + StartResponse: t.StartResponse, + Stdin: t.Stdin, + Stdout: t.Stdout, + Stderr: t.Stderr, + } + if t.Checkpoint != nil { + task.CheckpointPath = filepath.Join(t.CheckpointDir, t.Checkpoint.Name) + } + + s.startTasks <- task + ContainerCreateTimer.UpdateSince(start) + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/create_solaris.go b/vendor/github.com/docker/containerd/supervisor/create_solaris.go new file mode 100644 index 00000000..444e55bc --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/create_solaris.go @@ -0,0 +1,8 @@ +package supervisor + +type platformStartTask struct { +} + +// Checkpoint not supported on Solaris +func (task *startTask) setTaskCheckpoint(t *StartTask) { +} diff --git a/vendor/github.com/docker/containerd/supervisor/delete.go b/vendor/github.com/docker/containerd/supervisor/delete.go new file mode 100644 index 00000000..fd46f38b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/delete.go @@ -0,0 +1,42 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type DeleteTask struct { + baseTask + ID string + Status int + PID string + NoEvent bool +} + +func (s *Supervisor) delete(t *DeleteTask) error { + if i, ok := s.containers[t.ID]; ok { + start := time.Now() + if err := s.deleteContainer(i.container); err != nil { + logrus.WithField("error", err).Error("containerd: deleting container") + } + if !t.NoEvent { + s.notifySubscribers(Event{ + Type: StateExit, + Timestamp: time.Now(), + ID: t.ID, + Status: t.Status, + PID: t.PID, + }) + } + ContainersCounter.Dec(1) + ContainerDeleteTimer.UpdateSince(start) + } + return nil +} + +func (s *Supervisor) deleteContainer(container runtime.Container) error { + delete(s.containers, container.ID()) + return container.Delete() +} diff --git a/vendor/github.com/docker/containerd/supervisor/errors.go b/vendor/github.com/docker/containerd/supervisor/errors.go new file mode 100644 index 00000000..3a996272 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/errors.go @@ -0,0 +1,24 @@ +package supervisor + +import "errors" + +var ( + // External errors + ErrTaskChanNil = errors.New("containerd: task channel is nil") + ErrBundleNotFound = errors.New("containerd: bundle not found") + ErrContainerNotFound = errors.New("containerd: container not found") + ErrContainerExists = errors.New("containerd: container already exists") + ErrProcessNotFound = errors.New("containerd: process not found for container") + ErrUnknownContainerStatus = errors.New("containerd: unknown container status ") + ErrUnknownTask = errors.New("containerd: unknown task type") + + // Internal errors + errShutdown = errors.New("containerd: supervisor is shutdown") + errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path") + errNoContainerForPid = errors.New("containerd: pid not registered for any container") + // internal error where the handler will defer to another for the final response + // + // TODO: we could probably do a typed error with another error channel for this to make it + // less like magic + errDeferredResponse = errors.New("containerd: deferred response") +) diff --git a/vendor/github.com/docker/containerd/supervisor/exit.go b/vendor/github.com/docker/containerd/supervisor/exit.go new file mode 100644 index 00000000..8f19eee6 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/exit.go @@ -0,0 +1,81 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type ExitTask struct { + baseTask + Process runtime.Process +} + +func (s *Supervisor) exit(t *ExitTask) error { + start := time.Now() + proc := t.Process + status, err := proc.ExitStatus() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "pid": proc.ID(), + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Error("containerd: get exit status") + } + logrus.WithFields(logrus.Fields{ + "pid": proc.ID(), + "status": status, + "id": proc.Container().ID(), + "systemPid": proc.SystemPid(), + }).Debug("containerd: process exited") + + // if the process is the the init process of the container then + // fire a separate event for this process + if proc.ID() != runtime.InitProcessID { + ne := &ExecExitTask{ + ID: proc.Container().ID(), + PID: proc.ID(), + Status: status, + Process: proc, + } + s.SendTask(ne) + return nil + } + container := proc.Container() + ne := &DeleteTask{ + ID: container.ID(), + Status: status, + PID: proc.ID(), + } + s.SendTask(ne) + + ExitProcessTimer.UpdateSince(start) + + return nil +} + +type ExecExitTask struct { + baseTask + ID string + PID string + Status int + Process runtime.Process +} + +func (s *Supervisor) execExit(t *ExecExitTask) error { + container := t.Process.Container() + // exec process: we remove this process without notifying the main event loop + if err := container.RemoveProcess(t.PID); err != nil { + logrus.WithField("error", err).Error("containerd: find container for pid") + } + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateExit, + PID: t.PID, + Status: t.Status, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/get_containers.go b/vendor/github.com/docker/containerd/supervisor/get_containers.go new file mode 100644 index 00000000..23f49c78 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/get_containers.go @@ -0,0 +1,28 @@ +package supervisor + +import "github.com/docker/containerd/runtime" + +type GetContainersTask struct { + baseTask + ID string + Containers []runtime.Container +} + +func (s *Supervisor) getContainers(t *GetContainersTask) error { + + if t.ID != "" { + ci, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + t.Containers = append(t.Containers, ci.container) + + return nil + } + + for _, ci := range s.containers { + t.Containers = append(t.Containers, ci.container) + } + + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine.go b/vendor/github.com/docker/containerd/supervisor/machine.go new file mode 100644 index 00000000..7a21624b --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine.go @@ -0,0 +1,25 @@ +// +build !solaris + +package supervisor + +import "github.com/cloudfoundry/gosigar" + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + cpu := sigar.CpuList{} + if err := cpu.Get(); err != nil { + return m, err + } + m.Cpus = len(cpu.List) + mem := sigar.Mem{} + if err := mem.Get(); err != nil { + return m, err + } + m.Memory = int64(mem.Total / 1024 / 1024) + return m, nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/machine_solaris.go b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go new file mode 100644 index 00000000..c044705d --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/machine_solaris.go @@ -0,0 +1,15 @@ +package supervisor + +import ( + "errors" +) + +type Machine struct { + Cpus int + Memory int64 +} + +func CollectMachineInformation() (Machine, error) { + m := Machine{} + return m, errors.New("supervisor CollectMachineInformation not implemented on Solaris") +} diff --git a/vendor/github.com/docker/containerd/supervisor/metrics.go b/vendor/github.com/docker/containerd/supervisor/metrics.go new file mode 100644 index 00000000..2ba772a4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/metrics.go @@ -0,0 +1,31 @@ +package supervisor + +import "github.com/rcrowley/go-metrics" + +var ( + ContainerCreateTimer = metrics.NewTimer() + ContainerDeleteTimer = metrics.NewTimer() + ContainerStartTimer = metrics.NewTimer() + ContainerStatsTimer = metrics.NewTimer() + ContainersCounter = metrics.NewCounter() + EventSubscriberCounter = metrics.NewCounter() + TasksCounter = metrics.NewCounter() + ExecProcessTimer = metrics.NewTimer() + ExitProcessTimer = metrics.NewTimer() + EpollFdCounter = metrics.NewCounter() +) + +func Metrics() map[string]interface{} { + return map[string]interface{}{ + "container-create-time": ContainerCreateTimer, + "container-delete-time": ContainerDeleteTimer, + "container-start-time": ContainerStartTimer, + "container-stats-time": ContainerStatsTimer, + "containers": ContainersCounter, + "event-subscribers": EventSubscriberCounter, + "tasks": TasksCounter, + "exec-process-time": ExecProcessTimer, + "exit-process-time": ExitProcessTimer, + "epoll-fds": EpollFdCounter, + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_linux.go b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go new file mode 100644 index 00000000..b1765853 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_linux.go @@ -0,0 +1,129 @@ +package supervisor + +import ( + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/archutils" + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + m := &Monitor{ + receivers: make(map[int]interface{}), + exits: make(chan runtime.Process, 1024), + ooms: make(chan string, 1024), + } + fd, err := archutils.EpollCreate1(0) + if err != nil { + return nil, err + } + m.epollFd = fd + go m.start() + return m, nil +} + +type Monitor struct { + m sync.Mutex + receivers map[int]interface{} + exits chan runtime.Process + ooms chan string + epollFd int +} + +func (m *Monitor) Exits() chan runtime.Process { + return m.exits +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + m.m.Lock() + defer m.m.Unlock() + fd := p.ExitFD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = p + return nil +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + m.m.Lock() + defer m.m.Unlock() + o, err := c.OOM() + if err != nil { + return err + } + fd := o.FD() + event := syscall.EpollEvent{ + Fd: int32(fd), + Events: syscall.EPOLLHUP | syscall.EPOLLIN, + } + if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { + return err + } + EpollFdCounter.Inc(1) + m.receivers[fd] = o + return nil +} + +func (m *Monitor) Close() error { + return syscall.Close(m.epollFd) +} + +func (m *Monitor) start() { + var events [128]syscall.EpollEvent + for { + n, err := archutils.EpollWait(m.epollFd, events[:], -1) + if err != nil { + if err == syscall.EINTR { + continue + } + logrus.WithField("error", err).Fatal("containerd: epoll wait") + } + // process events + for i := 0; i < n; i++ { + fd := int(events[i].Fd) + m.m.Lock() + r := m.receivers[fd] + switch t := r.(type) { + case runtime.Process: + if events[i].Events == syscall.EPOLLHUP { + delete(m.receivers, fd) + if err = syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{ + Events: syscall.EPOLLHUP, + Fd: int32(fd), + }); err != nil { + logrus.WithField("error", err).Error("containerd: epoll remove fd") + } + if err := t.Close(); err != nil { + logrus.WithField("error", err).Error("containerd: close process IO") + } + EpollFdCounter.Dec(1) + m.exits <- t + } + case runtime.OOM: + // always flush the event fd + t.Flush() + if t.Removed() { + delete(m.receivers, fd) + // epoll will remove the fd from its set after it has been closed + t.Close() + EpollFdCounter.Dec(1) + } else { + m.ooms <- t.ContainerID() + } + } + m.m.Unlock() + } + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go new file mode 100644 index 00000000..6ad56ac8 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/monitor_solaris.go @@ -0,0 +1,38 @@ +package supervisor + +import ( + "errors" + + "github.com/docker/containerd/runtime" +) + +func NewMonitor() (*Monitor, error) { + return &Monitor{}, errors.New("Monitor NewMonitor() not implemented on Solaris") +} + +type Monitor struct { + ooms chan string +} + +func (m *Monitor) Exits() chan runtime.Process { + return nil +} + +func (m *Monitor) OOMs() chan string { + return m.ooms +} + +func (m *Monitor) Monitor(p runtime.Process) error { + return errors.New("Monitor Monitor() not implemented on Solaris") +} + +func (m *Monitor) MonitorOOM(c runtime.Container) error { + return errors.New("Monitor MonitorOOM() not implemented on Solaris") +} + +func (m *Monitor) Close() error { + return errors.New("Monitor Close() not implemented on Solaris") +} + +func (m *Monitor) start() { +} diff --git a/vendor/github.com/docker/containerd/supervisor/oom.go b/vendor/github.com/docker/containerd/supervisor/oom.go new file mode 100644 index 00000000..e204696e --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/oom.go @@ -0,0 +1,22 @@ +package supervisor + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +type OOMTask struct { + baseTask + ID string +} + +func (s *Supervisor) oom(t *OOMTask) error { + logrus.WithField("id", t.ID).Debug("containerd: container oom") + s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.ID, + Type: StateOOM, + }) + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/signal.go b/vendor/github.com/docker/containerd/supervisor/signal.go new file mode 100644 index 00000000..0705fc56 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/signal.go @@ -0,0 +1,27 @@ +package supervisor + +import "os" + +type SignalTask struct { + baseTask + ID string + PID string + Signal os.Signal +} + +func (s *Supervisor) signal(t *SignalTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + for _, p := range processes { + if p.ID() == t.PID { + return p.Signal(t.Signal) + } + } + return ErrProcessNotFound +} diff --git a/vendor/github.com/docker/containerd/supervisor/sort.go b/vendor/github.com/docker/containerd/supervisor/sort.go new file mode 100644 index 00000000..2153b7e4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/sort.go @@ -0,0 +1,27 @@ +package supervisor + +import ( + "sort" + + "github.com/docker/containerd/runtime" +) + +func sortProcesses(p []runtime.Process) { + sort.Sort(&processSorter{p}) +} + +type processSorter struct { + processes []runtime.Process +} + +func (s *processSorter) Len() int { + return len(s.processes) +} + +func (s *processSorter) Swap(i, j int) { + s.processes[i], s.processes[j] = s.processes[j], s.processes[i] +} + +func (s *processSorter) Less(i, j int) bool { + return s.processes[j].ID() == "init" +} diff --git a/vendor/github.com/docker/containerd/supervisor/stats.go b/vendor/github.com/docker/containerd/supervisor/stats.go new file mode 100644 index 00000000..00bf5f82 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/stats.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type StatsTask struct { + baseTask + ID string + Stat chan *runtime.Stat +} + +func (s *Supervisor) stats(t *StatsTask) error { + start := time.Now() + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + // TODO: use workers for this + go func() { + s, err := i.container.Stats() + if err != nil { + t.ErrorCh() <- err + return + } + t.ErrorCh() <- nil + t.Stat <- s + ContainerStatsTimer.UpdateSince(start) + }() + return errDeferredResponse +} diff --git a/vendor/github.com/docker/containerd/supervisor/supervisor.go b/vendor/github.com/docker/containerd/supervisor/supervisor.go new file mode 100644 index 00000000..8cc253d4 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/supervisor.go @@ -0,0 +1,385 @@ +package supervisor + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +const ( + defaultBufferSize = 2048 // size of queue in eventloop +) + +// New returns an initialized Process supervisor. +func New(stateDir string, runtimeName, shimName string, runtimeArgs []string, timeout time.Duration, retainCount int) (*Supervisor, error) { + startTasks := make(chan *startTask, 10) + if err := os.MkdirAll(stateDir, 0755); err != nil { + return nil, err + } + machine, err := CollectMachineInformation() + if err != nil { + return nil, err + } + monitor, err := NewMonitor() + if err != nil { + return nil, err + } + s := &Supervisor{ + stateDir: stateDir, + containers: make(map[string]*containerInfo), + startTasks: startTasks, + machine: machine, + subscribers: make(map[chan Event]struct{}), + tasks: make(chan Task, defaultBufferSize), + monitor: monitor, + runtime: runtimeName, + runtimeArgs: runtimeArgs, + shim: shimName, + timeout: timeout, + } + if err := setupEventLog(s, retainCount); err != nil { + return nil, err + } + go s.exitHandler() + go s.oomHandler() + if err := s.restore(); err != nil { + return nil, err + } + return s, nil +} + +type containerInfo struct { + container runtime.Container +} + +func setupEventLog(s *Supervisor, retainCount int) error { + if err := readEventLog(s); err != nil { + return err + } + logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events") + events := s.Events(time.Time{}) + return eventLogger(s, filepath.Join(s.stateDir, "events.log"), events, retainCount) +} + +func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error { + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) + if err != nil { + return err + } + go func() { + var ( + count = len(s.eventLog) + enc = json.NewEncoder(f) + ) + for e := range events { + // if we have a specified retain count make sure the truncate the event + // log if it grows past the specified number of events to keep. + if retainCount > 0 { + if count > retainCount { + logrus.Debug("truncating event log") + // close the log file + if f != nil { + f.Close() + } + slice := retainCount - 1 + l := len(s.eventLog) + if slice >= l { + slice = l + } + s.eventLock.Lock() + s.eventLog = s.eventLog[len(s.eventLog)-slice:] + s.eventLock.Unlock() + if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil { + logrus.WithField("error", err).Error("containerd: open event to journal") + continue + } + enc = json.NewEncoder(f) + count = 0 + for _, le := range s.eventLog { + if err := enc.Encode(le); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + } + } + s.eventLock.Lock() + s.eventLog = append(s.eventLog, e) + s.eventLock.Unlock() + count++ + if err := enc.Encode(e); err != nil { + logrus.WithField("error", err).Error("containerd: write event to journal") + } + } + }() + return nil +} + +func readEventLog(s *Supervisor) error { + f, err := os.Open(filepath.Join(s.stateDir, "events.log")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + break + } + return err + } + s.eventLog = append(s.eventLog, e) + } + return nil +} + +type Supervisor struct { + // stateDir is the directory on the system to store container runtime state information. + stateDir string + // name of the OCI compatible runtime used to execute containers + runtime string + runtimeArgs []string + shim string + containers map[string]*containerInfo + startTasks chan *startTask + // we need a lock around the subscribers map only because additions and deletions from + // the map are via the API so we cannot really control the concurrency + subscriberLock sync.RWMutex + subscribers map[chan Event]struct{} + machine Machine + tasks chan Task + monitor *Monitor + eventLog []Event + eventLock sync.Mutex + timeout time.Duration +} + +// Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to +// terminate. After it has handled all the SIGCHILD events it will close the signals chan +// and exit. Stop is a non-blocking call and will return after the containers have been signaled +func (s *Supervisor) Stop() { + // Close the startTasks channel so that no new containers get started + close(s.startTasks) +} + +// Close closes any open files in the supervisor but expects that Stop has been +// callsed so that no more containers are started. +func (s *Supervisor) Close() error { + return nil +} + +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + Timestamp time.Time `json:"timestamp"` + PID string `json:"pid,omitempty"` + Status int `json:"status,omitempty"` +} + +// Events returns an event channel that external consumers can use to receive updates +// on container events +func (s *Supervisor) Events(from time.Time) chan Event { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + c := make(chan Event, defaultBufferSize) + EventSubscriberCounter.Inc(1) + s.subscribers[c] = struct{}{} + if !from.IsZero() { + // replay old event + s.eventLock.Lock() + past := s.eventLog[:] + s.eventLock.Unlock() + for _, e := range past { + if e.Timestamp.After(from) { + c <- e + } + } + // Notify the client that from now on it's live events + c <- Event{ + Type: StateLive, + Timestamp: time.Now(), + } + } + return c +} + +// Unsubscribe removes the provided channel from receiving any more events +func (s *Supervisor) Unsubscribe(sub chan Event) { + s.subscriberLock.Lock() + defer s.subscriberLock.Unlock() + delete(s.subscribers, sub) + close(sub) + EventSubscriberCounter.Dec(1) +} + +// notifySubscribers will send the provided event to the external subscribers +// of the events channel +func (s *Supervisor) notifySubscribers(e Event) { + s.subscriberLock.RLock() + defer s.subscriberLock.RUnlock() + for sub := range s.subscribers { + // do a non-blocking send for the channel + select { + case sub <- e: + default: + logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber") + } + } +} + +// Start is a non-blocking call that runs the supervisor for monitoring contianer processes and +// executing new containers. +// +// This event loop is the only thing that is allowed to modify state of containers and processes +// therefore it is save to do operations in the handlers that modify state of the system or +// state of the Supervisor +func (s *Supervisor) Start() error { + logrus.WithFields(logrus.Fields{ + "stateDir": s.stateDir, + "runtime": s.runtime, + "runtimeArgs": s.runtimeArgs, + "memory": s.machine.Memory, + "cpus": s.machine.Cpus, + }).Debug("containerd: supervisor running") + go func() { + for i := range s.tasks { + s.handleTask(i) + } + }() + return nil +} + +// Machine returns the machine information for which the +// supervisor is executing on. +func (s *Supervisor) Machine() Machine { + return s.machine +} + +// SendTask sends the provided event the the supervisors main event loop +func (s *Supervisor) SendTask(evt Task) { + TasksCounter.Inc(1) + s.tasks <- evt +} + +func (s *Supervisor) exitHandler() { + for p := range s.monitor.Exits() { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } +} + +func (s *Supervisor) oomHandler() { + for id := range s.monitor.OOMs() { + e := &OOMTask{ + ID: id, + } + s.SendTask(e) + } +} + +func (s *Supervisor) monitorProcess(p runtime.Process) error { + return s.monitor.Monitor(p) +} + +func (s *Supervisor) restore() error { + dirs, err := ioutil.ReadDir(s.stateDir) + if err != nil { + return err + } + for _, d := range dirs { + if !d.IsDir() { + continue + } + id := d.Name() + container, err := runtime.Load(s.stateDir, id, s.timeout) + if err != nil { + return err + } + processes, err := container.Processes() + if err != nil { + return err + } + + ContainersCounter.Inc(1) + s.containers[id] = &containerInfo{ + container: container, + } + if err := s.monitor.MonitorOOM(container); err != nil && err != runtime.ErrContainerExited { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + logrus.WithField("id", id).Debug("containerd: container restored") + var exitedProcesses []runtime.Process + for _, p := range processes { + if p.State() == runtime.Running { + if err := s.monitorProcess(p); err != nil { + return err + } + } else { + exitedProcesses = append(exitedProcesses, p) + } + } + if len(exitedProcesses) > 0 { + // sort processes so that init is fired last because that is how the kernel sends the + // exit events + sortProcesses(exitedProcesses) + for _, p := range exitedProcesses { + e := &ExitTask{ + Process: p, + } + s.SendTask(e) + } + } + } + return nil +} + +func (s *Supervisor) handleTask(i Task) { + var err error + switch t := i.(type) { + case *AddProcessTask: + err = s.addProcess(t) + case *CreateCheckpointTask: + err = s.createCheckpoint(t) + case *DeleteCheckpointTask: + err = s.deleteCheckpoint(t) + case *StartTask: + err = s.start(t) + case *DeleteTask: + err = s.delete(t) + case *ExitTask: + err = s.exit(t) + case *ExecExitTask: + err = s.execExit(t) + case *GetContainersTask: + err = s.getContainers(t) + case *SignalTask: + err = s.signal(t) + case *StatsTask: + err = s.stats(t) + case *UpdateTask: + err = s.updateContainer(t) + case *UpdateProcessTask: + err = s.updateProcess(t) + case *OOMTask: + err = s.oom(t) + default: + err = ErrUnknownTask + } + if err != errDeferredResponse { + i.ErrorCh() <- err + close(i.ErrorCh()) + } +} diff --git a/vendor/github.com/docker/containerd/supervisor/task.go b/vendor/github.com/docker/containerd/supervisor/task.go new file mode 100644 index 00000000..4d6d1c50 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/task.go @@ -0,0 +1,33 @@ +package supervisor + +import ( + "sync" + + "github.com/docker/containerd/runtime" +) + +// StartResponse is the response containing a started container +type StartResponse struct { + Container runtime.Container +} + +// Task executes an action returning an error chan with either nil or +// the error from executing the task +type Task interface { + // ErrorCh returns a channel used to report and error from an async task + ErrorCh() chan error +} + +type baseTask struct { + errCh chan error + mu sync.Mutex +} + +func (t *baseTask) ErrorCh() chan error { + t.mu.Lock() + defer t.mu.Unlock() + if t.errCh == nil { + t.errCh = make(chan error, 1) + } + return t.errCh +} diff --git a/vendor/github.com/docker/containerd/supervisor/types.go b/vendor/github.com/docker/containerd/supervisor/types.go new file mode 100644 index 00000000..2e36fce0 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/types.go @@ -0,0 +1,12 @@ +package supervisor + +// State constants used in Event types +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateStartProcess = "start-process" + StateOOM = "oom" + StateLive = "live" +) diff --git a/vendor/github.com/docker/containerd/supervisor/update.go b/vendor/github.com/docker/containerd/supervisor/update.go new file mode 100644 index 00000000..fe2f4776 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/update.go @@ -0,0 +1,92 @@ +package supervisor + +import ( + "time" + + "github.com/docker/containerd/runtime" +) + +type UpdateTask struct { + baseTask + ID string + State runtime.State + Resources *runtime.Resource +} + +func (s *Supervisor) updateContainer(t *UpdateTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + container := i.container + if t.State != "" { + switch t.State { + case runtime.Running: + if err := container.Resume(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StateResume, + Timestamp: time.Now(), + }) + case runtime.Paused: + if err := container.Pause(); err != nil { + return err + } + s.notifySubscribers(Event{ + ID: t.ID, + Type: StatePause, + Timestamp: time.Now(), + }) + default: + return ErrUnknownContainerStatus + } + return nil + } + if t.Resources != nil { + return container.UpdateResources(t.Resources) + } + return nil +} + +type UpdateProcessTask struct { + baseTask + ID string + PID string + CloseStdin bool + Width int + Height int +} + +func (s *Supervisor) updateProcess(t *UpdateProcessTask) error { + i, ok := s.containers[t.ID] + if !ok { + return ErrContainerNotFound + } + processes, err := i.container.Processes() + if err != nil { + return err + } + var process runtime.Process + for _, p := range processes { + if p.ID() == t.PID { + process = p + break + } + } + if process == nil { + return ErrProcessNotFound + } + if t.CloseStdin { + if err := process.CloseStdin(); err != nil { + return err + } + } + if t.Width > 0 || t.Height > 0 { + if err := process.Resize(t.Width, t.Height); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/containerd/supervisor/worker.go b/vendor/github.com/docker/containerd/supervisor/worker.go new file mode 100644 index 00000000..a7570b80 --- /dev/null +++ b/vendor/github.com/docker/containerd/supervisor/worker.go @@ -0,0 +1,74 @@ +package supervisor + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/runtime" +) + +type Worker interface { + Start() +} + +type startTask struct { + Container runtime.Container + CheckpointPath string + Stdin string + Stdout string + Stderr string + Err chan error + StartResponse chan StartResponse +} + +func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker { + return &worker{ + s: s, + wg: wg, + } +} + +type worker struct { + wg *sync.WaitGroup + s *Supervisor +} + +func (w *worker) Start() { + defer w.wg.Done() + for t := range w.s.startTasks { + started := time.Now() + process, err := t.Container.Start(t.CheckpointPath, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + "id": t.Container.ID(), + }).Error("containerd: start container") + t.Err <- err + evt := &DeleteTask{ + ID: t.Container.ID(), + NoEvent: true, + } + w.s.SendTask(evt) + continue + } + if err := w.s.monitor.MonitorOOM(t.Container); err != nil && err != runtime.ErrContainerExited { + if process.State() != runtime.Stopped { + logrus.WithField("error", err).Error("containerd: notify OOM events") + } + } + if err := w.s.monitorProcess(process); err != nil { + logrus.WithField("error", err).Error("containerd: add process to monitor") + } + ContainerStartTimer.UpdateSince(started) + t.Err <- nil + t.StartResponse <- StartResponse{ + Container: t.Container, + } + w.s.notifySubscribers(Event{ + Timestamp: time.Now(), + ID: t.Container.ID(), + Type: StateStart, + }) + } +} diff --git a/vendor/github.com/docker/containerd/version.go b/vendor/github.com/docker/containerd/version.go new file mode 100644 index 00000000..d516b41e --- /dev/null +++ b/vendor/github.com/docker/containerd/version.go @@ -0,0 +1,11 @@ +package containerd + +import "fmt" + +const VersionMajor = 0 +const VersionMinor = 2 +const VersionPatch = 0 + +var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) + +var GitCommit = "" diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 00000000..88367b0a --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 00000000..a2082ec0 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 00000000..b3d1e554 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,281 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, errors.New("number of descriptors and number of layers in rootfs must match") + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + // todo: verification here? + mb.descriptors = append(mb.descriptors, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 00000000..bff47bde --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 00000000..fc1045f9 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 00000000..c862dd81 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 00000000..fa8daa56 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 00000000..44b94eaa --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,77 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.layers)), + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = MediaTypeConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 00000000..8d378e99 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,125 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeConfig specifies the mediaType for the image configuration. + MediaTypeConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + return m.Layers + +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 00000000..c57398bd --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and . Incoming +// content with unknown schema version can be decoded against this struct to +// check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 00000000..453f61a1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,5 @@ +This directory contains code pertaining to the Docker API: + + - Used by the docker client when communicating with the docker daemon + + - Used by third party tools wishing to interface with the docker daemon diff --git a/vendor/github.com/docker/docker/api/client/attach.go b/vendor/github.com/docker/docker/api/client/attach.go new file mode 100644 index 00000000..e89644d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/attach.go @@ -0,0 +1,109 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +// CmdAttach attaches to a running container. +// +// Usage: docker attach [OPTIONS] CONTAINER +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := Cli.Subcmd("attach", []string{"CONTAINER"}, Cli.DockerCommands["attach"].Description, true) + noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN") + proxy := cmd.Bool([]string{"-sig-proxy"}, true, "Proxy all received signals to the process") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { + return err + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: cmd.Arg(0), + Stream: true, + Stdin: !*noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + if *proxy && !c.Config.Tty { + sigc := cli.forwardAllSignals(options.ContainerID) + defer signal.StopCatch(sigc) + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + if c.Config.Tty && cli.isTerminalOut { + height, width := cli.getTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + cli.resizeTtyTo(cmd.Arg(0), height+1, width+1, false) + + // After the above resizing occurs, the call to monitorTtySize below will handle resetting back + // to the actual size. + if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + + if err := cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp); err != nil { + return err + } + + _, status, err := getExitCode(cli, options.ContainerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/build.go b/vendor/github.com/docker/docker/api/client/build.go new file mode 100644 index 00000000..cdba3fa5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/build.go @@ -0,0 +1,321 @@ +package client + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +type translatorFunc func(reference.NamedTagged) (reference.Canonical, error) + +// CmdBuild builds a new image from the source code at a given path. +// +// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. +// +// Usage: docker build [OPTIONS] PATH | URL | - +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) + flTags := opts.NewListOpts(validateTag) + cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") + noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") + forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") + pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") + dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") + flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv) + cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") + isolation := cmd.String([]string{"-isolation"}, "", "Container isolation technology") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for an image") + + ulimits := make(map[string]*units.Ulimit) + flUlimits := runconfigopts.NewUlimitOpt(&ulimits) + cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") + + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + var ( + ctx io.ReadCloser + err error + ) + + specifiedContext := cmd.Arg(0) + + var ( + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + progBuff = cli.out + buildBuff = cli.out + if *suppressOutput { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + ctx, relDockerfile, err = builder.GetContextFromReader(cli.in, *dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, *dockerfileName) + case urlutil.IsURL(specifiedContext): + ctx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, *dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, *dockerfileName) + } + + if err != nil { + if *suppressOutput && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(cli.err, progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if ctx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + ctx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + + var body io.Reader = progress.NewProgressReader(ctx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if *flMemoryString != "" { + parsedMemory, err := units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if *flShmSize != "" { + shmSize, err = units.RAMInBytes(*flShmSize) + if err != nil { + return err + } + } + + options := types.ImageBuildOptions{ + Context: body, + Memory: memory, + MemorySwap: memorySwap, + Tags: flTags.GetAll(), + SuppressOutput: *suppressOutput, + NoCache: *noCache, + Remove: *rm, + ForceRemove: *forceRm, + PullParent: *pull, + Isolation: container.Isolation(*isolation), + CPUSetCPUs: *flCPUSetCpus, + CPUSetMems: *flCPUSetMems, + CPUShares: *flCPUShares, + CPUQuota: *flCPUQuota, + CPUPeriod: *flCPUPeriod, + CgroupParent: *flCgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: flUlimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), + AuthConfigs: cli.retrieveAuthConfigs(), + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + response, err := cli.client.ImageBuild(context.Background(), options) + if err != nil { + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if *suppressOutput { + fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) + } + return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" { + fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if *suppressOutput { + fmt.Fprintf(cli.out, "%s", buildBuff) + } + + return nil +} + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + var content io.Reader = tarReader + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/docker/api/client/cli.go b/vendor/github.com/docker/docker/api/client/cli.go new file mode 100644 index 00000000..6c673da4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cli.go @@ -0,0 +1,215 @@ +package client + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/client" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + // initializing closure + init func() error + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // inFd holds the file descriptor of the client's STDIN (if valid). + inFd uintptr + // outFd holds file descriptor of the client's STDOUT (if valid). + outFd uintptr + // isTerminalIn indicates whether the client's STDIN is a TTY + isTerminalIn bool + // isTerminalOut indicates whether the client's STDOUT is a TTY + isTerminalOut bool + // client is the http client that performs all API operations + client client.APIClient + // state holds the terminal state + state *term.State +} + +// Initialize calls the init function that will setup the configuration for the client +// such as the TLS, tcp and other parameters used to run the client. +func (cli *DockerCli) Initialize() error { + if cli.init == nil { + return nil + } + return cli.init() +} + +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !cli.isTerminalIn { + return errors.New("cannot enable tty mode on non tty input") + } + return nil +} + +// PsFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) PsFormat() string { + return cli.configFile.PsFormat +} + +// ImagesFormat returns the format string specified in the configuration. +// String contains columns and format specification, for example {{ID}}\t{{Name}}. +func (cli *DockerCli) ImagesFormat() string { + return cli.configFile.ImagesFormat +} + +func (cli *DockerCli) setRawTerminal() error { + if cli.isTerminalIn && os.Getenv("NORAW") == "" { + state, err := term.SetRawTerminal(cli.inFd) + if err != nil { + return err + } + cli.state = state + } + return nil +} + +func (cli *DockerCli) restoreTerminal(in io.Closer) error { + if cli.state != nil { + term.RestoreTerminal(cli.inFd, cli.state) + } + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). +func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cli.ClientFlags) *DockerCli { + cli := &DockerCli{ + in: in, + out: out, + err: err, + keyFile: clientFlags.Common.TrustKey, + } + + cli.init = func() error { + clientFlags.PostParse() + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(cli.err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + cli.configFile = configFile + + host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + customHeaders := cli.configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = clientUserAgent() + + verStr := api.DefaultVersion.String() + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions) + if err != nil { + return err + } + + client, err := client.NewClient(host, verStr, httpClient, customHeaders) + if err != nil { + return err + } + cli.client = client + + if cli.in != nil { + cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) + } + if cli.out != nil { + cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) + } + + return nil + } + + return cli +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = opts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +func clientUserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/docker/api/client/client.go b/vendor/github.com/docker/docker/api/client/client.go new file mode 100644 index 00000000..4cfce5f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/client.go @@ -0,0 +1,5 @@ +// Package client provides a command-line interface for Docker. +// +// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. +// See https://docs.docker.com/installation/ for instructions on installing Docker. +package client diff --git a/vendor/github.com/docker/docker/api/client/commit.go b/vendor/github.com/docker/docker/api/client/commit.go new file mode 100644 index 00000000..6bec4297 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/commit.go @@ -0,0 +1,85 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// CmdCommit creates a new image from a container's changes. +// +// Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := Cli.Subcmd("commit", []string{"CONTAINER [REPOSITORY[:TAG]]"}, Cli.DockerCommands["commit"].Description, true) + flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") + cmd.Require(flag.Max, 2) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + name = cmd.Arg(0) + repositoryAndTag = cmd.Arg(1) + repositoryName string + tag string + ) + + //Check if the given image name can be resolved + if repositoryAndTag != "" { + ref, err := reference.ParseNamed(repositoryAndTag) + if err != nil { + return err + } + + repositoryName = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot commit to digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + } + + var config *container.Config + if *flConfig != "" { + config = &container.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + + options := types.ContainerCommitOptions{ + ContainerID: name, + RepositoryName: repositoryName, + Tag: tag, + Comment: *flComment, + Author: *flAuthor, + Changes: flChanges.GetAll(), + Pause: *flPause, + Config: config, + } + + response, err := cli.client.ContainerCommit(context.Background(), options) + if err != nil { + return err + } + + fmt.Fprintln(cli.out, response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/cp.go b/vendor/github.com/docker/docker/api/client/cp.go new file mode 100644 index 00000000..61005602 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/cp.go @@ -0,0 +1,298 @@ +package client + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" +) + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// CmdCp copies files/folders to or from a path in a container. +// +// When copying from a container, if DEST_PATH is '-' the data is written as a +// tar archive file to STDOUT. +// +// When copying to a container, if SRC_PATH is '-' the data is read as a tar +// archive file from STDIN, and the destination CONTAINER:DEST_PATH, must specify +// a directory. +// +// Usage: +// docker cp CONTAINER:SRC_PATH DEST_PATH|- +// docker cp SRC_PATH|- CONTAINER:DEST_PATH +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := Cli.Subcmd( + "cp", + []string{"CONTAINER:SRC_PATH DEST_PATH|-", "SRC_PATH|- CONTAINER:DEST_PATH"}, + strings.Join([]string{ + Cli.DockerCommands["cp"].Description, + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + true, + ) + + followLink := cmd.Bool([]string{"L", "-follow-link"}, false, "Always follow symbol link in SRC_PATH") + + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("source can not be empty") + } + if cmd.Arg(1) == "" { + return fmt.Errorf("destination can not be empty") + } + + srcContainer, srcPath := splitCpArg(cmd.Arg(0)) + dstContainer, dstPath := splitCpArg(cmd.Arg(1)) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: *followLink, + } + + switch direction { + case fromContainer: + return cli.copyFromContainer(srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return cli.copyToContainer(srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} + +func (cli *DockerCli) statContainerPath(containerName, path string) (types.ContainerPathStat, error) { + return cli.client.ContainerStatPath(context.Background(), containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := cli.statContainerPath(srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := cli.client.CopyFromContainer(context.Background(), srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := cli.statContainerPath(dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = cli.statContainerPath(dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + ContainerID: dstContainer, + Path: resolvedDstPath, + Content: content, + AllowOverwriteDirWithFile: false, + } + + return cli.client.CopyToContainer(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/create.go b/vendor/github.com/docker/docker/api/client/create.go new file mode 100644 index 00000000..176bd8e9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/create.go @@ -0,0 +1,164 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +func (cli *DockerCli) pullImage(image string) error { + return cli.pullImageCustomOut(image, cli.out) +} + +func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + var tag string + switch x := reference.WithDefaultTag(ref).(type) { + case reference.Canonical: + tag = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + Parent: ref.Name(), + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImageCreate(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, out, cli.outFd, cli.isTerminalOut, nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func (cli *DockerCli) createContainer(config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + } + + //create the container + response, err := cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if client.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { + return nil, err + } + // Retry + var retryErr error + response, retryErr = cli.client.ContainerCreate(context.Background(), config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} + +// CmdCreate creates a new container from a given image. +// +// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdCreate(args ...string) error { + cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["create"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(1) + } + if config.Image == "" { + cmd.Usage() + return nil + } + response, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/diff.go b/vendor/github.com/docker/docker/api/client/diff.go new file mode 100644 index 00000000..e17768fd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/diff.go @@ -0,0 +1,49 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/archive" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdDiff shows changes on a container's filesystem. +// +// Each changed file is printed on a separate line, prefixed with a single +// character that indicates the status of the file: C (modified), A (added), +// or D (deleted). +// +// Usage: docker diff CONTAINER +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := Cli.Subcmd("diff", []string{"CONTAINER"}, Cli.DockerCommands["diff"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + + changes, err := cli.client.ContainerDiff(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/events.go b/vendor/github.com/docker/docker/api/client/events.go new file mode 100644 index 00000000..d2408c19 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/events.go @@ -0,0 +1,146 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdEvents prints a live stream of real time events from the server. +// +// Usage: docker events [OPTIONS] +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := Cli.Subcmd("events", nil, Cli.DockerCommands["events"].Description, true) + since := cmd.String([]string{"-since"}, "", "Show all events created since timestamp") + until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + eventFilterArgs := filters.NewArgs() + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + for _, f := range flFilter.GetAll() { + var err error + eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) + if err != nil { + return err + } + } + + options := types.EventsOptions{ + Since: *since, + Until: *until, + Filters: eventFilterArgs, + } + + responseBody, err := cli.client.Events(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return streamEvents(responseBody, cli.out) +} + +// streamEvents decodes prints the incoming events in the provided output. +func streamEvents(input io.Reader, output io.Writer) error { + return decodeEvents(input, func(event eventtypes.Message, err error) error { + if err != nil { + return err + } + printOutput(event, output) + return nil + }) +} + +type eventProcessor func(event eventtypes.Message, err error) error + +func decodeEvents(input io.Reader, ep eventProcessor) error { + dec := json.NewDecoder(input) + for { + var event eventtypes.Message + err := dec.Decode(&event) + if err != nil && err == io.EOF { + break + } + + if procErr := ep(event, err); procErr != nil { + return procErr + } + } + return nil +} + +// printOutput prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func printOutput(event eventtypes.Message, output io.Writer) { + if event.TimeNano != 0 { + fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(output, "\n") +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/docker/api/client/exec.go b/vendor/github.com/docker/docker/api/client/exec.go new file mode 100644 index 00000000..520c3a38 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/exec.go @@ -0,0 +1,166 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/engine-api/types" +) + +// CmdExec runs a command in a running container. +// +// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] +func (cli *DockerCli) CmdExec(args ...string) error { + cmd := Cli.Subcmd("exec", []string{"CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + + execConfig, err := ParseExec(cmd, args) + // just in case the ParseExec does not exit + if execConfig.Container == "" || err != nil { + return Cli.StatusError{StatusCode: 1} + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = cli.configFile.DetachKeys + + response, err := cli.client.ContainerExecCreate(context.Background(), *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(cli.out, "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := cli.client.ContainerExecStart(context.Background(), execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(cli.out, "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = cli.in + } + if execConfig.AttachStdout { + out = cli.out + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + resp, err := cli.client.ContainerExecAttach(context.Background(), execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + if in != nil && execConfig.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && cli.isTerminalIn { + if err := cli.monitorTtySize(execID, true); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(cli, execID); err != nil { + return err + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + + return nil +} + +// ParseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +// If the minimal number of specified args is not right or if specified args are +// not valid, it will return an error. +func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { + var ( + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string + ) + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return nil, err + } + container = cmd.Arg(0) + parsedArgs := cmd.Args() + execCmd = parsedArgs[1:] + + execConfig := &types.ExecConfig{ + User: *flUser, + Privileged: *flPrivileged, + Tty: *flTty, + Cmd: execCmd, + Container: container, + Detach: *flDetach, + } + + // If -d is not set, attach to everything by default + if !*flDetach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if *flStdin { + execConfig.AttachStdin = true + } + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/docker/api/client/export.go b/vendor/github.com/docker/docker/api/client/export.go new file mode 100644 index 00000000..a1d3ebe7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/export.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdExport exports a filesystem as a tar archive. +// +// The tar archive is streamed to STDOUT by default or written to a file. +// +// Usage: docker export [OPTIONS] CONTAINER +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := Cli.Subcmd("export", []string{"CONTAINER"}, Cli.DockerCommands["export"].Description, true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ContainerExport(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/custom.go b/vendor/github.com/docker/docker/api/client/formatter/custom.go new file mode 100644 index 00000000..2bb26a3d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/custom.go @@ -0,0 +1,242 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" +) + +const ( + tableKey = "table" + + containerIDHeader = "CONTAINER ID" + imageHeader = "IMAGE" + namesHeader = "NAMES" + commandHeader = "COMMAND" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" + mountsHeader = "MOUNTS" +) + +type containerContext struct { + baseSubContext + trunc bool + c types.Container +} + +func (c *containerContext) ID() string { + c.addHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.addHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.addHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.addHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Truncate(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.addHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.addHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.addHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.addHeader(sizeHeader) + srw := units.HumanSize(float64(c.c.SizeRw)) + sv := units.HumanSize(float64(c.c.SizeRootFs)) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.addHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.addHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.addHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Truncate(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +type imageContext struct { + baseSubContext + trunc bool + i types.Image + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.addHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.addHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.addHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.addHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.addHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.addHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.addHeader(sizeHeader) + return units.HumanSize(float64(c.i.Size)) +} + +type subContext interface { + fullHeader() string + addHeader(header string) +} + +type baseSubContext struct { + header []string +} + +func (c *baseSubContext) fullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +func (c *baseSubContext) addHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + for i, s := range ss { + ss[i] = s[1:] + } + + return ss +} diff --git a/vendor/github.com/docker/docker/api/client/formatter/formatter.go b/vendor/github.com/docker/docker/api/client/formatter/formatter.go new file mode 100644 index 00000000..bc3d50c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/formatter/formatter.go @@ -0,0 +1,255 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/reference" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +const ( + tableFormatKey = "table" + rawFormatKey = "raw" + + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultQuietFormat = "{{.ID}}" +) + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format string + // Quiet when set to true will simply print minimal information. + Quiet bool + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + table bool + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preformat() { + c.finalFormat = c.Format + + if strings.HasPrefix(c.Format, tableKey) { + c.table = true + c.finalFormat = c.finalFormat[len(tableKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + } + return tmpl, err +} + +func (c *Context) postformat(tmpl *template.Template, subContext subContext) { + if c.table { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.fullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) + c.buffer.WriteTo(c.Output) + return err + } + if c.table && len(c.header) == 0 { + c.header = subContext.fullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. +type ContainerContext struct { + Context + // Size when set to true will display the size of the output. + Size bool + // Containers + Containers []types.Container +} + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool + // Images + Images []types.Image +} + +func (ctx ContainerContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultContainerTableFormat + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `container_id: {{.ID}}` + } else { + ctx.Format = `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{.Status}} +names: {{.Names}} +labels: {{.Labels}} +ports: {{.Ports}} +` + if ctx.Size { + ctx.Format += `size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Size { + ctx.finalFormat += "\t{{.Size}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, container := range ctx.Containers { + containerCtx := &containerContext{ + trunc: ctx.Trunc, + c: container, + } + err = ctx.contextFormat(tmpl, containerCtx) + if err != nil { + return + } + } + + ctx.postformat(tmpl, &containerContext{}) +} + +func (ctx ImageContext) Write() { + switch ctx.Format { + case tableFormatKey: + ctx.Format = defaultImageTableFormat + if ctx.Digest { + ctx.Format = defaultImageTableFormatWithDigest + } + if ctx.Quiet { + ctx.Format = defaultQuietFormat + } + case rawFormatKey: + if ctx.Quiet { + ctx.Format = `image_id: {{.ID}}` + } else { + if ctx.Digest { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } else { + ctx.Format = `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + } + + ctx.buffer = bytes.NewBufferString("") + ctx.preformat() + if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { + ctx.finalFormat += "\t{{.Digest}}" + } + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + for _, image := range ctx.Images { + + repoTags := image.RepoTags + repoDigests := image.RepoDigests + + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigests so we only show it once below + repoDigests = []string{} + } + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo := "" + tag := "" + digest := "" + + if !strings.HasPrefix(repoAndRef, "") { + ref, err := reference.ParseNamed(repoAndRef) + if err != nil { + continue + } + repo = ref.Name() + + switch x := ref.(type) { + case reference.Canonical: + digest = x.Digest().String() + case reference.NamedTagged: + tag = x.Tag() + } + } + imageCtx := &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: digest, + } + err = ctx.contextFormat(tmpl, imageCtx) + if err != nil { + return + } + } + } + + ctx.postformat(tmpl, &imageContext{}) +} diff --git a/vendor/github.com/docker/docker/api/client/hijack.go b/vendor/github.com/docker/docker/api/client/hijack.go new file mode 100644 index 00000000..4c80fe1c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/hijack.go @@ -0,0 +1,56 @@ +package client + +import ( + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var err error + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + logrus.Debugf("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + logrus.Debugf("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + if err := <-receiveStdout; err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/history.go b/vendor/github.com/docker/docker/api/client/history.go new file mode 100644 index 00000000..25bb4157 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/history.go @@ -0,0 +1,76 @@ +package client + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" +) + +// CmdHistory shows the history of an image. +// +// Usage: docker history [OPTIONS] IMAGE +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + history, err := cli.client.ImageHistory(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + if *quiet { + for _, entry := range history { + if *noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if *noTrunc == false { + createdBy = stringutils.Truncate(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if *human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSize(float64(entry.Size)) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/images.go b/vendor/github.com/docker/docker/api/client/images.go new file mode 100644 index 00000000..4840b63d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/images.go @@ -0,0 +1,81 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. +// +// Usage: docker images [OPTIONS] [REPOSITORY] +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") + format := cmd.String([]string{"-format"}, "", "Pretty-print images using a Go template") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + // Consolidate all filter flags, and sanity check them early. + // They'll get process in the daemon/server. + imageFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) + if err != nil { + return err + } + } + + var matchName string + if cmd.NArg() == 1 { + matchName = cmd.Arg(0) + } + + options := types.ImageListOptions{ + MatchName: matchName, + All: *all, + Filters: imageFilterArgs, + } + + images, err := cli.client.ImageList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.ImagesFormat()) > 0 && !*quiet { + f = cli.ImagesFormat() + } else { + f = "table" + } + } + + imagesCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Digest: *showDigests, + Images: images, + } + + imagesCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/import.go b/vendor/github.com/docker/docker/api/client/import.go new file mode 100644 index 00000000..c96e1e97 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/import.go @@ -0,0 +1,82 @@ +package client + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdImport creates an empty filesystem image, imports the contents of the tarball into the image, and optionally tags the image. +// +// The URL argument is the address of a tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) file or a path to local file relative to docker client. If the URL is '-', then the tar file is read from STDIN. +// +// Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := Cli.Subcmd("import", []string{"file|URL|- [REPOSITORY[:TAG]]"}, Cli.DockerCommands["import"].Description, true) + flChanges := opts.NewListOpts(nil) + cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") + message := cmd.String([]string{"m", "-message"}, "", "Set commit message for imported image") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var ( + in io.Reader + tag string + src = cmd.Arg(0) + srcName = src + repository = cmd.Arg(1) + changes = flChanges.GetAll() + ) + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'file|URL|- [REPOSITORY [TAG]]' has been deprecated. Please use file|URL|- [REPOSITORY[:TAG]]\n") + tag = cmd.Arg(2) + } + + if repository != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(repository); err != nil { + return err + } + } + + if src == "-" { + in = cli.in + } else if !urlutil.IsURL(src) { + srcName = "-" + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + in = file + } + + options := types.ImageImportOptions{ + Source: in, + SourceName: srcName, + RepositoryName: repository, + Message: *message, + Tag: tag, + Changes: changes, + } + + responseBody, err := cli.client.ImageImport(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/info.go b/vendor/github.com/docker/docker/api/client/info.go new file mode 100644 index 00000000..29df605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/info.go @@ -0,0 +1,155 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/ioutils" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/go-units" +) + +// CmdInfo displays system-wide information. +// +// Usage: docker info +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + info, err := cli.client.Info(context.Background()) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Either use `--storage-opt dm.thinpooldev` or use `--storage-opt dm.no_warn_on_loop_devices=true` to suppress this warning.") + } + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(cli.out, "Plugins: \n") + fmt.Fprintf(cli.out, " Volume:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(cli.out, "\n") + fmt.Fprintf(cli.out, " Network:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(cli.out, "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(cli.out, " Authorization:") + fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(cli.out, "\n") + } + + ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + } + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(cli.err, "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(cli.err, "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(cli.err, "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(cli.err, "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(cli.err, "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(cli.out, "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(cli.out, " %s\n", attribute) + } + } + + ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(cli.out, "Cluster store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(cli.out, "Cluster advertise: %s\n", info.ClusterAdvertise) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect.go b/vendor/github.com/docker/docker/api/client/inspect.go new file mode 100644 index 00000000..2e97a5aa --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect.go @@ -0,0 +1,127 @@ +package client + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/inspect" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/client" +) + +// CmdInspect displays low-level information on one or more containers or images. +// +// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := Cli.Subcmd("inspect", []string{"CONTAINER|IMAGE [CONTAINER|IMAGE...]"}, Cli.DockerCommands["inspect"].Description, true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image or container)") + size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *inspectType != "" && *inspectType != "container" && *inspectType != "image" { + return fmt.Errorf("%q is not a valid value for --type", *inspectType) + } + + var elementSearcher inspectSearcher + switch *inspectType { + case "container": + elementSearcher = cli.inspectContainers(*size) + case "image": + elementSearcher = cli.inspectImages(*size) + default: + elementSearcher = cli.inspectAll(*size) + } + + return cli.inspectElements(*tmplStr, cmd.Args(), elementSearcher) +} + +func (cli *DockerCli) inspectContainers(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectImages(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + return cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + } +} + +func (cli *DockerCli) inspectAll(getSize bool) inspectSearcher { + return func(ref string) (interface{}, []byte, error) { + c, rawContainer, err := cli.client.ContainerInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + // Search for image with that id if a container doesn't exist. + if client.IsErrContainerNotFound(err) { + i, rawImage, err := cli.client.ImageInspectWithRaw(context.Background(), ref, getSize) + if err != nil { + if client.IsErrImageNotFound(err) { + return nil, nil, fmt.Errorf("Error: No such image or container: %s", ref) + } + return nil, nil, err + } + return i, rawImage, err + } + return nil, nil, err + } + return c, rawContainer, err + } +} + +type inspectSearcher func(ref string) (interface{}, []byte, error) + +func (cli *DockerCli) inspectElements(tmplStr string, references []string, searchByReference inspectSearcher) error { + elementInspector, err := cli.newInspectorWithTemplate(tmplStr) + if err != nil { + return Cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := searchByReference(ref) + if err != nil { + inspectErr = err + break + } + + if err := elementInspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := elementInspector.Flush(); err != nil { + cli.inspectErrorStatus(err) + } + + if status := cli.inspectErrorStatus(inspectErr); status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) inspectErrorStatus(err error) (status int) { + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + } + return +} + +func (cli *DockerCli) newInspectorWithTemplate(tmplStr string) (inspect.Inspector, error) { + elementInspector := inspect.NewIndentedInspector(cli.out) + if tmplStr != "" { + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + elementInspector = inspect.NewTemplateInspector(cli.out, tmpl) + } + return elementInspector, nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector.go b/vendor/github.com/docker/docker/api/client/inspect/inspector.go new file mode 100644 index 00000000..a1d16d47 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector.go @@ -0,0 +1,119 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement, err) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush write the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go new file mode 100644 index 00000000..39a0510c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go14.go @@ -0,0 +1,40 @@ +// +build !go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// tryeRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +// Unfortunately, go 1.4 doesn't fail executing invalid templates when the input is an interface. +// It doesn't allow to modify this behavior either, sending messages to the output. +// We assume that the template is invalid when there is a , if the template was valid +// we'd get or "" values. In that case we fail with the original error raised executing the +// template with the typed input. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, originalErr error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + if rawErr := i.tmpl.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + if strings.Contains(buffer.String(), "") { + return fmt.Errorf("Template parsing error: %v", originalErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go new file mode 100644 index 00000000..b098f415 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/inspect/inspector_go15.go @@ -0,0 +1,29 @@ +// +build go1.5 + +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" +) + +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte, _ error) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/kill.go b/vendor/github.com/docker/docker/api/client/kill.go new file mode 100644 index 00000000..9841ba4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/kill.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdKill kills one or more running container using SIGKILL or a specified signal. +// +// Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := Cli.Subcmd("kill", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["kill"].Description, true) + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerKill(context.Background(), name, *signal); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/load.go b/vendor/github.com/docker/docker/api/client/load.go new file mode 100644 index 00000000..820fdc0e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/load.go @@ -0,0 +1,50 @@ +package client + +import ( + "io" + "os" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLoad loads an image from a tar archive. +// +// The tar archive is read from STDIN by default, or from a tar archive file. +// +// Usage: docker load [OPTIONS] +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := Cli.Subcmd("load", nil, Cli.DockerCommands["load"].Description, true) + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the load output") + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + var input io.Reader = cli.in + if *infile != "" { + file, err := os.Open(*infile) + if err != nil { + return err + } + defer file.Close() + input = file + } + if !cli.isTerminalOut { + *quiet = true + } + response, err := cli.client.ImageLoad(context.Background(), input, *quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesStream(response.Body, cli.out, cli.outFd, cli.isTerminalOut, nil) + } + + _, err = io.Copy(cli.out, response.Body) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/login.go b/vendor/github.com/docker/docker/api/client/login.go new file mode 100644 index 00000000..a772348c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/login.go @@ -0,0 +1,177 @@ +package client + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/credentials" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" +) + +// CmdLogin logs in a user to a Docker registry service. +// +// If no server is specified, the user will be logged into or registered to the registry's index server. +// +// Usage: docker login SERVER +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := Cli.Subcmd("login", []string{"[SERVER]"}, Cli.DockerCommands["login"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + flUser := cmd.String([]string{"u", "-username"}, "", "Username") + flPassword := cmd.String([]string{"p", "-password"}, "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.13 + cmd.String([]string{"#e", "#-email"}, "", "Email") + + cmd.ParseFlags(args, true) + + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = os.Stdin + } + + var serverAddress string + var isDefaultRegistry bool + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + isDefaultRegistry = true + } + + authConfig, err := cli.configureAuth(*flUser, *flPassword, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + + response, err := cli.client.RegistryLogin(context.Background(), authConfig) + if err != nil { + return err + } + + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := storeCredentials(cli.configFile, authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(cli.out, response.Status) + } + return nil +} + +func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } +} + +func (cli *DockerCli) configureAuth(flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + authconfig, err := getCredentials(cli.configFile, serverAddress) + if err != nil { + return authconfig, err + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a defauly registry (docker hub), then display the following message. + fmt.Fprintln(cli.out, "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + cli.promptWithDefault("Username", authconfig.Username) + flUser = readInput(cli.in, cli.out) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + + if flPassword == "" { + oldState, err := term.SaveState(cli.inFd) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.inFd, oldState) + + flPassword = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.inFd, oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +// getCredentials loads the user credentials from a credentials store. +// The store is determined by the config file settings. +func getCredentials(c *cliconfig.ConfigFile, serverAddress string) (types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.Get(serverAddress) +} + +func getAllCredentials(c *cliconfig.ConfigFile) (map[string]types.AuthConfig, error) { + s := loadCredentialsStore(c) + return s.GetAll() +} + +// storeCredentials saves the user credentials in a credentials store. +// The store is determined by the config file settings. +func storeCredentials(c *cliconfig.ConfigFile, auth types.AuthConfig) error { + s := loadCredentialsStore(c) + return s.Store(auth) +} + +// eraseCredentials removes the user credentials from a credentials store. +// The store is determined by the config file settings. +func eraseCredentials(c *cliconfig.ConfigFile, serverAddress string) error { + s := loadCredentialsStore(c) + return s.Erase(serverAddress) +} + +// loadCredentialsStore initializes a new credentials store based +// in the settings provided in the configuration file. +func loadCredentialsStore(c *cliconfig.ConfigFile) credentials.Store { + if c.CredentialsStore != "" { + return credentials.NewNativeStore(c) + } + return credentials.NewFileStore(c) +} diff --git a/vendor/github.com/docker/docker/api/client/logout.go b/vendor/github.com/docker/docker/api/client/logout.go new file mode 100644 index 00000000..b5ff59dd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logout.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdLogout logs a user out from a Docker registry. +// +// If no server is specified, the user will be logged out from the registry's index server. +// +// Usage: docker logout [SERVER] +func (cli *DockerCli) CmdLogout(args ...string) error { + cmd := Cli.Subcmd("logout", []string{"[SERVER]"}, Cli.DockerCommands["logout"].Description+".\nIf no server is specified, the default is defined by the daemon.", true) + cmd.Require(flag.Max, 1) + + cmd.ParseFlags(args, true) + + var serverAddress string + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } else { + serverAddress = cli.electAuthServer() + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { + fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) + return nil + } + + fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) + if err := eraseCredentials(cli.configFile, serverAddress); err != nil { + fmt.Fprintf(cli.out, "WARNING: could not erase credentials: %v\n", err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/logs.go b/vendor/github.com/docker/docker/api/client/logs.go new file mode 100644 index 00000000..7cd5605e --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/logs.go @@ -0,0 +1,65 @@ +package client + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +// CmdLogs fetches the logs of a given container. +// +// docker logs [OPTIONS] CONTAINER +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := Cli.Subcmd("logs", []string{"CONTAINER"}, Cli.DockerCommands["logs"].Description, true) + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + since := cmd.String([]string{"-since"}, "", "Show logs since timestamp") + times := cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") + tail := cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + + c, err := cli.client.ContainerInspect(context.Background(), name) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ContainerID: name, + ShowStdout: true, + ShowStderr: true, + Since: *since, + Timestamps: *times, + Follow: *follow, + Tail: *tail, + } + responseBody, err := cli.client.ContainerLogs(context.Background(), options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(cli.out, responseBody) + } else { + _, err = stdcopy.StdCopy(cli.out, cli.err, responseBody) + } + return err +} diff --git a/vendor/github.com/docker/docker/api/client/network.go b/vendor/github.com/docker/docker/api/client/network.go new file mode 100644 index 00000000..4bbc7154 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/network.go @@ -0,0 +1,392 @@ +package client + +import ( + "fmt" + "net" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringid" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" +) + +// CmdNetwork is the parent subcommand for all network commands +// +// Usage: docker network [OPTIONS] +func (cli *DockerCli) CmdNetwork(args ...string) error { + cmd := Cli.Subcmd("network", []string{"COMMAND [OPTIONS]"}, networkUsage(), false) + cmd.Require(flag.Min, 1) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdNetworkCreate creates a new network with a given name +// +// Usage: docker network create [OPTIONS] +func (cli *DockerCli) CmdNetworkCreate(args ...string) error { + cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false) + flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network") + flOpts := opts.NewMapOpts(nil, nil) + + flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver") + flIpamSubnet := opts.NewListOpts(nil) + flIpamIPRange := opts.NewListOpts(nil) + flIpamGateway := opts.NewListOpts(nil) + flIpamAux := opts.NewMapOpts(nil, nil) + flIpamOpt := opts.NewMapOpts(nil, nil) + flLabels := opts.NewListOpts(nil) + + cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment") + cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range") + cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet") + cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver") + cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options") + cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options") + cmd.Var(&flLabels, []string{"-label"}, "set metadata on a network") + + flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network") + flIPv6 := cmd.Bool([]string{"-ipv6"}, false, "enable IPv6 networking") + + cmd.Require(flag.Exact, 1) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Set the default driver to "" if the user didn't set the value. + // That way we can know whether it was user input or not. + driver := *flDriver + if !cmd.IsSet("-driver") && !cmd.IsSet("d") { + driver = "" + } + + ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Name: cmd.Arg(0), + Driver: driver, + IPAM: network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()}, + Options: flOpts.GetAll(), + CheckDuplicate: true, + Internal: *flInternal, + EnableIPv6: *flIPv6, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + resp, err := cli.client.NetworkCreate(context.Background(), nc) + if err != nil { + return err + } + fmt.Fprintf(cli.out, "%s\n", resp.ID) + return nil +} + +// CmdNetworkRm deletes one or more networks +// +// Usage: docker network rm NETWORK-NAME|NETWORK-ID [NETWORK-NAME|NETWORK-ID...] +func (cli *DockerCli) CmdNetworkRm(args ...string) error { + cmd := Cli.Subcmd("network rm", []string{"NETWORK [NETWORK...]"}, "Deletes one or more networks", false) + cmd.Require(flag.Min, 1) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + status := 0 + for _, net := range cmd.Args() { + if err := cli.client.NetworkRemove(context.Background(), net); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} + +// CmdNetworkConnect connects a container to a network +// +// Usage: docker network connect [OPTIONS] +func (cli *DockerCli) CmdNetworkConnect(args ...string) error { + cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false) + flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address") + flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address") + flLinks := opts.NewListOpts(runconfigopts.ValidateLink) + cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") + flAliases := opts.NewListOpts(nil) + cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container") + cmd.Require(flag.Min, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: *flIPAddress, + IPv6Address: *flIPv6Address, + }, + Links: flLinks.GetAll(), + Aliases: flAliases.GetAll(), + } + return cli.client.NetworkConnect(context.Background(), cmd.Arg(0), cmd.Arg(1), epConfig) +} + +// CmdNetworkDisconnect disconnects a container from a network +// +// Usage: docker network disconnect +func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error { + cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false) + force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network") + cmd.Require(flag.Exact, 2) + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + return cli.client.NetworkDisconnect(context.Background(), cmd.Arg(0), cmd.Arg(1), *force) +} + +// CmdNetworkLs lists all the networks managed by docker daemon +// +// Usage: docker network ls [OPTIONS] +func (cli *DockerCli) CmdNetworkLs(args ...string) error { + cmd := Cli.Subcmd("network ls", nil, "Lists networks", true) + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Do not truncate the output") + + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + if err != nil { + return err + } + + // Consolidate all filter flags, and sanity check them early. + // They'll get process after get response from server. + netFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + if netFilterArgs, err = filters.ParseFlag(f, netFilterArgs); err != nil { + return err + } + } + + options := types.NetworkListOptions{ + Filters: netFilterArgs, + } + + networkResources, err := cli.client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + + // unless quiet (-q) is specified, print field titles + if !*quiet { + fmt.Fprintln(wr, "NETWORK ID\tNAME\tDRIVER") + } + sort.Sort(byNetworkName(networkResources)) + for _, networkResource := range networkResources { + ID := networkResource.ID + netName := networkResource.Name + if !*noTrunc { + ID = stringid.TruncateID(ID) + } + if *quiet { + fmt.Fprintln(wr, ID) + continue + } + driver := networkResource.Driver + fmt.Fprintf(wr, "%s\t%s\t%s\t", + ID, + netName, + driver) + fmt.Fprint(wr, "\n") + } + wr.Flush() + return nil +} + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +// CmdNetworkInspect inspects the network object for more details +// +// Usage: docker network inspect [OPTIONS] [NETWORK...] +func (cli *DockerCli) CmdNetworkInspect(args ...string) error { + cmd := Cli.Subcmd("network inspect", []string{"NETWORK [NETWORK...]"}, "Displays detailed information on one or more networks", false) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Min, 1) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.NetworkInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} + +func networkUsage() string { + networkCommands := map[string]string{ + "create": "Create a network", + "connect": "Connect container to a network", + "disconnect": "Disconnect container from a network", + "inspect": "Display detailed network information", + "ls": "List all networks", + "rm": "Remove a network", + } + + help := "Commands:\n" + + for cmd, description := range networkCommands { + help += fmt.Sprintf(" %-25.25s%s\n", cmd, description) + } + + help += fmt.Sprintf("\nRun 'docker network COMMAND --help' for more information on a command.") + return help +} diff --git a/vendor/github.com/docker/docker/api/client/pause.go b/vendor/github.com/docker/docker/api/client/pause.go new file mode 100644 index 00000000..ffba1c9a --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdPause pauses all processes within one or more containers. +// +// Usage: docker pause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdPause(args ...string) error { + cmd := Cli.Subcmd("pause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["pause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerPause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/port.go b/vendor/github.com/docker/docker/api/client/port.go new file mode 100644 index 00000000..9b545f56 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/port.go @@ -0,0 +1,61 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/nat" +) + +// CmdPort lists port mappings for a container. +// If a private port is specified, it also shows the public-facing port that is NATed to the private port. +// +// Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := Cli.Subcmd("port", []string{"CONTAINER [PRIVATE_PORT[/PROTO]]"}, Cli.DockerCommands["port"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + c, err := cli.client.ContainerInspect(context.Background(), cmd.Arg(0)) + if err != nil { + return err + } + + if cmd.NArg() == 2 { + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/ps.go b/vendor/github.com/docker/docker/api/client/ps.go new file mode 100644 index 00000000..3627ffc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/ps.go @@ -0,0 +1,89 @@ +package client + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/client/formatter" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdPs outputs a list of Docker containers. +// +// Usage: docker ps [OPTIONS] +func (cli *DockerCli) CmdPs(args ...string) error { + var ( + err error + + psFilterArgs = filters.NewArgs() + + cmd = Cli.Subcmd("ps", nil, Cli.DockerCommands["ps"].Description, true) + quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") + all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noTrunc = cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container (includes all states)") + since = cmd.String([]string{"#-since"}, "", "Show containers created since Id or Name (includes all states)") + before = cmd.String([]string{"#-before"}, "", "Only show containers created before Id or Name") + last = cmd.Int([]string{"n"}, -1, "Show n last created containers (includes all states)") + format = cmd.String([]string{"-format"}, "", "Pretty-print containers using a Go template") + flFilter = opts.NewListOpts(nil) + ) + cmd.Require(flag.Exact, 0) + + cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") + + cmd.ParseFlags(args, true) + if *last == -1 && *nLatest { + *last = 1 + } + + // Consolidate all filter flags, and sanity check them. + // They'll get processed in the daemon/server. + for _, f := range flFilter.GetAll() { + if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { + return err + } + } + + options := types.ContainerListOptions{ + All: *all, + Limit: *last, + Since: *since, + Before: *before, + Size: *size, + Filter: psFilterArgs, + } + + containers, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + return err + } + + f := *format + if len(f) == 0 { + if len(cli.PsFormat()) > 0 && !*quiet { + f = cli.PsFormat() + } else { + f = "table" + } + } + + psCtx := formatter.ContainerContext{ + Context: formatter.Context{ + Output: cli.out, + Format: f, + Quiet: *quiet, + Trunc: !*noTrunc, + }, + Size: *size, + Containers: containers, + } + + psCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/pull.go b/vendor/github.com/docker/docker/api/client/pull.go new file mode 100644 index 00000000..eb79d38b --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/pull.go @@ -0,0 +1,73 @@ +package client + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPull pulls an image or a repository from the registry. +// +// Usage: docker pull [OPTIONS] IMAGENAME[:TAG|@DIGEST] +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := Cli.Subcmd("pull", []string{"NAME[:TAG|@DIGEST]"}, Cli.DockerCommands["pull"].Description, true) + allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + remote := cmd.Arg(0) + + distributionRef, err := reference.ParseNamed(remote) + if err != nil { + return err + } + if *allTags && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !*allTags && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(cli.out, "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(repoInfo.Index) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") + + return cli.imagePullPrivileged(authConfig, distributionRef.String(), "", requestPrivilege) +} + +func (cli *DockerCli) imagePullPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) error { + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + responseBody, err := cli.client.ImagePull(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} diff --git a/vendor/github.com/docker/docker/api/client/push.go b/vendor/github.com/docker/docker/api/client/push.go new file mode 100644 index 00000000..5f51ec27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/push.go @@ -0,0 +1,72 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/pkg/jsonmessage" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" +) + +// CmdPush pushes an image or repository to the registry. +// +// Usage: docker push NAME[:TAG] +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := Cli.Subcmd("push", []string{"NAME[:TAG]"}, Cli.DockerCommands["push"].Description, true) + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(0)) + if err != nil { + return err + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.resolveAuthConfig(repoInfo.Index) + + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(repoInfo.Index, "push") + + responseBody, err := cli.imagePushPrivileged(authConfig, ref.Name(), tag, requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) +} + +func (cli *DockerCli) imagePushPrivileged(authConfig types.AuthConfig, imageID, tag string, requestPrivilege client.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + ImageID: imageID, + Tag: tag, + RegistryAuth: encodedAuth, + } + + return cli.client.ImagePush(context.Background(), options, requestPrivilege) +} diff --git a/vendor/github.com/docker/docker/api/client/rename.go b/vendor/github.com/docker/docker/api/client/rename.go new file mode 100644 index 00000000..68369881 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rename.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRename renames a container. +// +// Usage: docker rename OLD_NAME NEW_NAME +func (cli *DockerCli) CmdRename(args ...string) error { + cmd := Cli.Subcmd("rename", []string{"OLD_NAME NEW_NAME"}, Cli.DockerCommands["rename"].Description, true) + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + oldName := strings.TrimSpace(cmd.Arg(0)) + newName := strings.TrimSpace(cmd.Arg(1)) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := cli.client.ContainerRename(context.Background(), oldName, newName); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/restart.go b/vendor/github.com/docker/docker/api/client/restart.go new file mode 100644 index 00000000..c0a04bd1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/restart.go @@ -0,0 +1,35 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdRestart restarts one or more containers. +// +// Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := Cli.Subcmd("restart", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["restart"].Description, true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerRestart(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rm.go b/vendor/github.com/docker/docker/api/client/rm.go new file mode 100644 index 00000000..c252b1f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rm.go @@ -0,0 +1,56 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := Cli.Subcmd("rm", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["rm"].Description, true) + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") + link := cmd.Bool([]string{"l", "-link"}, false, "Remove the specified link") + force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + name = strings.Trim(name, "/") + + if err := cli.removeContainer(name, *v, *link, *force); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} + +func (cli *DockerCli) removeContainer(containerID string, removeVolumes, removeLinks, force bool) error { + options := types.ContainerRemoveOptions{ + ContainerID: containerID, + RemoveVolumes: removeVolumes, + RemoveLinks: removeLinks, + Force: force, + } + if err := cli.client.ContainerRemove(context.Background(), options); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/rmi.go b/vendor/github.com/docker/docker/api/client/rmi.go new file mode 100644 index 00000000..ac1b41db --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/rmi.go @@ -0,0 +1,59 @@ +package client + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/engine-api/types" +) + +// CmdRmi removes all images with the specified name(s). +// +// Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdRmi(args ...string) error { + cmd := Cli.Subcmd("rmi", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["rmi"].Description, true) + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") + noprune := cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var errs []string + for _, name := range cmd.Args() { + options := types.ImageRemoveOptions{ + ImageID: name, + Force: *force, + PruneChildren: !*noprune, + } + + dels, err := cli.client.ImageRemove(context.Background(), options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", del.Untagged) + } + } + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/run.go b/vendor/github.com/docker/docker/api/client/run.go new file mode 100644 index 00000000..be0c62cb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/run.go @@ -0,0 +1,274 @@ +package client + +import ( + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" +) + +const ( + errCmdNotFound = "not found or does not exist." + errCmdCouldNotBeInvoked = "could not be invoked." +) + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +// if container start fails with 'command not found' error, return 127 +// if container start fails with 'command cannot be invoked' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.Trim(err.Error(), "Error response from daemon: ") + statusError := Cli.StatusError{StatusCode: 125} + + if strings.HasPrefix(trimmedErr, "Container command") { + if strings.Contains(trimmedErr, errCmdNotFound) { + statusError = Cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, errCmdCouldNotBeInvoked) { + statusError = Cli.StatusError{StatusCode: 126} + } + } + + return statusError +} + +// CmdRun runs a command in a new container. +// +// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] +func (cli *DockerCli) CmdRun(args ...string) error { + cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true) + + // These are flags not stored in Config/HostConfig + var ( + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") + flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + flAttach *opts.ListOpts + + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") + ) + + config, hostConfig, networkingConfig, cmd, err := runconfigopts.Parse(cmd, args) + + // just in case the Parse does not exit + if err != nil { + cmd.ReportError(err.Error(), true) + os.Exit(125) + } + + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if config.Image == "" { + cmd.Usage() + return nil + } + + config.ArgsEscaped = false + + if !*flDetach { + if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := cmd.Lookup("-attach"); fl != nil { + flAttach = fl.Value.(*opts.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + if *flAutoRemove { + return ErrConflictDetachAutoRemove + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable flSigProxy when in TTY mode + sigProxy := *flSigProxy + if config.Tty { + sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize() + } + + createResponse, err := cli.createContainer(config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, *flName) + if err != nil { + cmd.ReportError(err.Error(), true) + return runStartContainerErr(err) + } + if sigProxy { + sigc := cli.forwardAllSignals(createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(cli.out, "%s\n", createResponse.ID) + }() + } + if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { + return ErrConflictRestartPolicyAndAutoRemove + } + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = cli.in + } + if config.AttachStdout { + out = cli.out + } + if config.AttachStderr { + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + if *flDetachKeys != "" { + cli.configFile.DetachKeys = *flDetachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: createResponse.ID, + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: cli.configFile.DetachKeys, + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + if in != nil && config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + errCh = promise.Go(func() error { + return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp) + }) + } + + if *flAutoRemove { + defer func() { + if err := cli.removeContainer(createResponse.ID, true, false, false); err != nil { + fmt.Fprintf(cli.err, "%v\n", err) + } + }() + } + + //start the container + if err := cli.client.ContainerStart(context.Background(), createResponse.ID); err != nil { + cmd.ReportError(err.Error(), false) + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(createResponse.ID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + var status int + + // Attached mode + if *flAutoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return runStartContainerErr(err) + } + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } else { + // No Autoremove: Simply retrieve the exit code + if !config.Tty { + // In non-TTY mode, we can't detach, so we must wait for container exit + if status, err = cli.client.ContainerWait(context.Background(), createResponse.ID); err != nil { + return err + } + } else { + // In TTY mode, there is a race: if the process dies too slowly, the state could + // be updated after the getExitCode call and result in the wrong exit code being reported + if _, status, err = getExitCode(cli, createResponse.ID); err != nil { + return err + } + } + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/save.go b/vendor/github.com/docker/docker/api/client/save.go new file mode 100644 index 00000000..4aabf1bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/save.go @@ -0,0 +1,42 @@ +package client + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdSave saves one or more images to a tar archive. +// +// The tar archive is written to STDOUT by default, or written to a file. +// +// Usage: docker save [OPTIONS] IMAGE [IMAGE...] +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := Cli.Subcmd("save", []string{"IMAGE [IMAGE...]"}, Cli.DockerCommands["save"].Description+" (streamed to STDOUT by default)", true) + outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *outfile == "" && cli.isTerminalOut { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := cli.client.ImageSave(context.Background(), cmd.Args()) + if err != nil { + return err + } + defer responseBody.Close() + + if *outfile == "" { + _, err := io.Copy(cli.out, responseBody) + return err + } + + return copyToFile(*outfile, responseBody) + +} diff --git a/vendor/github.com/docker/docker/api/client/search.go b/vendor/github.com/docker/docker/api/client/search.go new file mode 100644 index 00000000..82deb409 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/search.go @@ -0,0 +1,93 @@ +package client + +import ( + "fmt" + "net/url" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +// CmdSearch searches the Docker Hub for images. +// +// Usage: docker search [OPTIONS] TERM +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := Cli.Subcmd("search", []string{"TERM"}, Cli.DockerCommands["search"].Description, true) + noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") + automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") + stars := cmd.Uint([]string{"s", "-stars"}, 0, "Only displays with at least x stars") + cmd.Require(flag.Exact, 1) + + cmd.ParseFlags(args, true) + + name := cmd.Arg(0) + v := url.Values{} + v.Set("term", name) + + indexInfo, err := registry.ParseSearchIndexInfo(name) + if err != nil { + return err + } + + authConfig := cli.resolveAuthConfig(indexInfo) + requestPrivilege := cli.registryAuthenticationPrivilegedFunc(indexInfo, "search") + + encodedAuth, err := encodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + Term: name, + RegistryAuth: encodedAuth, + } + + unorderedResults, err := cli.client.ImageSearch(context.Background(), options, requestPrivilege) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + if (*automated && !res.IsAutomated) || (int(*stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = stringutils.Truncate(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated || res.IsTrusted { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/api/client/start.go b/vendor/github.com/docker/docker/api/client/start.go new file mode 100644 index 00000000..1ff2845f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/start.go @@ -0,0 +1,157 @@ +package client + +import ( + "fmt" + "io" + "os" + "strings" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/engine-api/types" +) + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.client.ContainerKill(context.Background(), cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +// CmdStart starts one or more containers. +// +// Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := Cli.Subcmd("start", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["start"].Description, true) + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") + detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + if *attach || *openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + containerID := cmd.Arg(0) + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + return err + } + + if !c.Config.Tty { + sigc := cli.forwardAllSignals(containerID) + defer signal.StopCatch(sigc) + } + + if *detachKeys != "" { + cli.configFile.DetachKeys = *detachKeys + } + + options := types.ContainerAttachOptions{ + ContainerID: containerID, + Stream: true, + Stdin: *openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: cli.configFile.DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = cli.in + } + + resp, err := cli.client.ContainerAttach(context.Background(), options) + if err != nil { + return err + } + defer resp.Close() + if in != nil && c.Config.Tty { + if err := cli.setRawTerminal(); err != nil { + return err + } + defer cli.restoreTerminal(in) + } + + cErr := promise.Go(func() error { + return cli.holdHijackedConnection(c.Config.Tty, in, cli.out, cli.err, resp) + }) + + // 3. Start the container. + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + return err + } + + // 4. Wait for attachment to break. + if c.Config.Tty && cli.isTerminalOut { + if err := cli.monitorTtySize(containerID, false); err != nil { + fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + _, status, err := getExitCode(cli, containerID) + if err != nil { + return err + } + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return cli.startContainersWithoutAttachments(cmd.Args()) + } + + return nil +} + +func (cli *DockerCli) startContainersWithoutAttachments(containerIDs []string) error { + var failedContainers []string + for _, containerID := range containerIDs { + if err := cli.client.ContainerStart(context.Background(), containerID); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + failedContainers = append(failedContainers, containerID) + } else { + fmt.Fprintf(cli.out, "%s\n", containerID) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats.go b/vendor/github.com/docker/docker/api/client/stats.go new file mode 100644 index 00000000..b84ac3e0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats.go @@ -0,0 +1,208 @@ +package client + +import ( + "fmt" + "io" + "strings" + "sync" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// CmdStats displays a live stream of resource usage statistics for one or more containers. +// +// This shows real-time information on CPU usage, memory usage, and network I/O. +// +// Usage: docker stats [OPTIONS] [CONTAINER...] +func (cli *DockerCli) CmdStats(args ...string) error { + cmd := Cli.Subcmd("stats", []string{"[CONTAINER...]"}, Cli.DockerCommands["stats"].Description, true) + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") + noStream := cmd.Bool([]string{"-no-stream"}, false, "Disable streaming stats and only pull the first result") + + cmd.ParseFlags(args, true) + + names := cmd.Args() + showAll := len(names) == 0 + closeChan := make(chan error) + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + resBody, err := cli.client.Events(context.Background(), options) + // Whether we successfully subscribed to events or not, we can now + // unblock the main goroutine. + close(started) + if err != nil { + closeChan <- err + return + } + defer resBody.Close() + + decodeEvents(resBody, func(event events.Message, err error) error { + if err != nil { + closeChan <- err + return nil + } + c <- event + return nil + }) + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: *all, + } + cs, err := cli.client.ContainerList(context.Background(), options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := &containerStats{Name: container.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := eventHandler{handlers: make(map[string]func(events.Message))} + eh.Handle("create", func(e events.Message) { + if *all { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := &containerStats{Name: e.ID[:12]} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !*all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range names { + s := &containerStats{Name: name} + if cStats.add(s) { + waitFirst.Add(1) + go s.Collect(cli.client, !*noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + c.mu.Lock() + if c.err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) + } + c.mu.Unlock() + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + printHeader := func() { + if !*noStream { + fmt.Fprint(cli.out, "\033[2J") + fmt.Fprint(cli.out, "\033[H") + } + io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") + } + + for range time.Tick(500 * time.Millisecond) { + printHeader() + toRemove := []int{} + cStats.mu.Lock() + for i, s := range cStats.cs { + if err := s.Display(w); err != nil && !*noStream { + toRemove = append(toRemove, i) + } + } + for j := len(toRemove) - 1; j >= 0; j-- { + i := toRemove[j] + cStats.cs = append(cStats.cs[:i], cStats.cs[i+1:]...) + } + if len(cStats.cs) == 0 && !showAll { + return nil + } + cStats.mu.Unlock() + w.Flush() + if *noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/stats_helpers.go b/vendor/github.com/docker/docker/api/client/stats_helpers.go new file mode 100644 index 00000000..404c3ff1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stats_helpers.go @@ -0,0 +1,219 @@ +package client + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +type containerStats struct { + Name string + CPUPercentage float64 + Memory float64 + MemoryLimit float64 + MemoryPercentage float64 + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 + mu sync.RWMutex + err error +} + +type stats struct { + mu sync.Mutex + cs []*containerStats +} + +func (s *stats) add(cs *containerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Name); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Name == cid { + return i, true + } + } + return -1, false +} + +func (s *containerStats) Collect(cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats) + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + defer responseBody.Close() + + dec := json.NewDecoder(responseBody) + go func() { + for { + var v *types.StatsJSON + if err := dec.Decode(&v); err != nil { + u <- err + return + } + + var memPercent = 0.0 + var cpuPercent = 0.0 + + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) + blkRead, blkWrite := calculateBlockIO(v.BlkioStats) + s.mu.Lock() + s.CPUPercentage = cpuPercent + s.Memory = float64(v.MemoryStats.Usage) + s.MemoryLimit = float64(v.MemoryStats.Limit) + s.MemoryPercentage = memPercent + s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) + s.BlockRead = float64(blkRead) + s.BlockWrite = float64(blkWrite) + s.PidsCurrent = v.PidsStats.Current + s.mu.Unlock() + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.mu.Lock() + s.CPUPercentage = 0 + s.Memory = 0 + s.MemoryPercentage = 0 + s.MemoryLimit = 0 + s.NetworkRx = 0 + s.NetworkTx = 0 + s.BlockRead = 0 + s.BlockWrite = 0 + s.PidsCurrent = 0 + s.mu.Unlock() + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.mu.Lock() + s.err = err + s.mu.Unlock() + return + } + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func (s *containerStats) Display(w io.Writer) error { + s.mu.RLock() + defer s.mu.RUnlock() + if s.err != nil { + return s.err + } + fmt.Fprintf(w, "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n", + s.Name, + s.CPUPercentage, + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), + s.MemoryPercentage, + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), + units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite), + s.PidsCurrent) + return nil +} + +func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/api/client/stop.go b/vendor/github.com/docker/docker/api/client/stop.go new file mode 100644 index 00000000..23d53447 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/stop.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdStop stops one or more containers. +// +// A running container is stopped by first sending SIGTERM and then SIGKILL if the container fails to stop within a grace period (the default is 10 seconds). +// +// Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := Cli.Subcmd("stop", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["stop"].Description+".\nSending SIGTERM and then SIGKILL after a grace period", true) + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerStop(context.Background(), name, *nSeconds); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/tag.go b/vendor/github.com/docker/docker/api/client/tag.go new file mode 100644 index 00000000..1d87e437 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/tag.go @@ -0,0 +1,46 @@ +package client + +import ( + "errors" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +// CmdTag tags an image into a repository. +// +// Usage: docker tag [OPTIONS] IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG] +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := Cli.Subcmd("tag", []string{"IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]"}, Cli.DockerCommands["tag"].Description, true) + force := cmd.Bool([]string{"#f", "#-force"}, false, "Force the tagging even if there's a conflict") + cmd.Require(flag.Exact, 2) + + cmd.ParseFlags(args, true) + + ref, err := reference.ParseNamed(cmd.Arg(1)) + if err != nil { + return err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + var tag string + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + + options := types.ImageTagOptions{ + ImageID: cmd.Arg(0), + RepositoryName: ref.Name(), + Tag: tag, + Force: *force, + } + + return cli.client.ImageTag(context.Background(), options) +} diff --git a/vendor/github.com/docker/docker/api/client/top.go b/vendor/github.com/docker/docker/api/client/top.go new file mode 100644 index 00000000..bb2ec46c --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/top.go @@ -0,0 +1,41 @@ +package client + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdTop displays the running processes of a container. +// +// Usage: docker top CONTAINER +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := Cli.Subcmd("top", []string{"CONTAINER [ps OPTIONS]"}, Cli.DockerCommands["top"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var arguments []string + if cmd.NArg() > 1 { + arguments = cmd.Args()[1:] + } + + procList, err := cli.client.ContainerTop(context.Background(), cmd.Arg(0), arguments) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/unpause.go b/vendor/github.com/docker/docker/api/client/unpause.go new file mode 100644 index 00000000..b8630b1f --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/unpause.go @@ -0,0 +1,34 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdUnpause unpauses all processes within a container, for one or more containers. +// +// Usage: docker unpause CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUnpause(args ...string) error { + cmd := Cli.Subcmd("unpause", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["unpause"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + if err := cli.client.ContainerUnpause(context.Background(), name); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/update.go b/vendor/github.com/docker/docker/api/client/update.go new file mode 100644 index 00000000..a2f9e534 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/update.go @@ -0,0 +1,117 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" +) + +// CmdUpdate updates resources of one or more containers. +// +// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdUpdate(args ...string) error { + cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) + flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") + flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") + flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") + flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") + flRestartPolicy := cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + if cmd.NFlag() == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var err error + var flMemory int64 + if *flMemoryString != "" { + flMemory, err = units.RAMInBytes(*flMemoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if *flMemoryReservation != "" { + memoryReservation, err = units.RAMInBytes(*flMemoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if *flMemorySwap != "" { + if *flMemorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(*flMemorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if *flKernelMemory != "" { + kernelMemory, err = units.RAMInBytes(*flKernelMemory) + if err != nil { + return err + } + } + + var restartPolicy container.RestartPolicy + if *flRestartPolicy != "" { + restartPolicy, err = opts.ParseRestartPolicy(*flRestartPolicy) + if err != nil { + return err + } + } + + resources := container.Resources{ + BlkioWeight: *flBlkioWeight, + CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CPUShares: *flCPUShares, + Memory: flMemory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: *flCPUPeriod, + CPUQuota: *flCPUQuota, + } + + updateConfig := container.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + names := cmd.Args() + var errs []string + for _, name := range names { + if err := cli.client.ContainerUpdate(context.Background(), name, updateConfig); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/utils.go b/vendor/github.com/docker/docker/api/client/utils.go new file mode 100644 index 00000000..4deee224 --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/utils.go @@ -0,0 +1,202 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + registrytypes "github.com/docker/engine-api/types/registry" +) + +func (cli *DockerCli) electAuthServer() string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.client.Info(context.Background()); err != nil { + fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +func (cli *DockerCli) registryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) client.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + authConfig, err := cli.configureAuth("", "", indexServer, false) + if err != nil { + return "", err + } + return encodeAuthToBase64(authConfig) + } +} + +func (cli *DockerCli) resizeTty(id string, isExec bool) { + height, width := cli.getTtySize() + cli.resizeTtyTo(id, height, width, isExec) +} + +func (cli *DockerCli) resizeTtyTo(id string, height, width int, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + ID: id, + Height: height, + Width: width, + } + + var err error + if isExec { + err = cli.client.ContainerExecResize(context.Background(), options) + } else { + err = cli.client.ContainerResize(context.Background(), options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { + c, err := cli.client.ContainerInspect(context.Background(), containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return c.State.Running, c.State.ExitCode, nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { + resp, err := cli.client.ContainerExecInspect(context.Background(), execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != client.ErrConnectionFailed { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { + cli.resizeTty(id, isExec) + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.getTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.getTtySize() + + if prevW != w || prevH != h { + cli.resizeTty(id, isExec) + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + cli.resizeTty(id, isExec) + } + }() + } + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminalOut { + return 0, 0 + } + ws, err := term.GetWinsize(cli.outFd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func copyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// resolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func (cli *DockerCli) resolveAuthConfig(index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = cli.electAuthServer() + } + + a, _ := getCredentials(cli.configFile, configKey) + return a +} + +func (cli *DockerCli) retrieveAuthConfigs() map[string]types.AuthConfig { + acs, _ := getAllCredentials(cli.configFile) + return acs +} diff --git a/vendor/github.com/docker/docker/api/client/version.go b/vendor/github.com/docker/docker/api/client/version.go new file mode 100644 index 00000000..ebec4def --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/version.go @@ -0,0 +1,95 @@ +package client + +import ( + "runtime" + "text/template" + "time" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/engine-api/types" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} + Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} + Experimental: {{.Server.Experimental}}{{end}}{{end}}` + +// CmdVersion shows Docker version information. +// +// Available version information is shown for: client Docker version, client API version, client Go version, client Git commit, client OS/Arch, server Docker version, server API version, server Go version, server Git commit, and server OS/Arch. +// +// Usage: docker version +func (cli *DockerCli) CmdVersion(args ...string) (err error) { + cmd := Cli.Subcmd("version", nil, Cli.DockerCommands["version"].Description, true) + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") + cmd.Require(flag.Exact, 0) + + cmd.ParseFlags(args, true) + + templateFormat := versionTemplate + if *tmplStr != "" { + templateFormat = *tmplStr + } + + var tmpl *template.Template + if tmpl, err = templates.Parse(templateFormat); err != nil { + return Cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: cli.client.ClientVersion(), + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + Experimental: utils.ExperimentalBuild(), + }, + } + + serverVersion, err := cli.client.ServerVersion(context.Background()) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(cli.out, vd); err2 != nil && err == nil { + err = err2 + } + cli.out.Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/api/client/volume.go b/vendor/github.com/docker/docker/api/client/volume.go new file mode 100644 index 00000000..37e623fb --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/volume.go @@ -0,0 +1,177 @@ +package client + +import ( + "fmt" + "sort" + "text/tabwriter" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +// CmdVolume is the parent subcommand for all volume commands +// +// Usage: docker volume +func (cli *DockerCli) CmdVolume(args ...string) error { + description := Cli.DockerCommands["volume"].Description + "\n\nCommands:\n" + commands := [][]string{ + {"create", "Create a volume"}, + {"inspect", "Return low-level information on a volume"}, + {"ls", "List volumes"}, + {"rm", "Remove a volume"}, + } + + for _, cmd := range commands { + description += fmt.Sprintf(" %-25.25s%s\n", cmd[0], cmd[1]) + } + + description += "\nRun 'docker volume COMMAND --help' for more information on a command" + cmd := Cli.Subcmd("volume", []string{"[COMMAND]"}, description, false) + + cmd.Require(flag.Exact, 0) + err := cmd.ParseFlags(args, true) + cmd.Usage() + return err +} + +// CmdVolumeLs outputs a list of Docker volumes. +// +// Usage: docker volume ls [OPTIONS] +func (cli *DockerCli) CmdVolumeLs(args ...string) error { + cmd := Cli.Subcmd("volume ls", nil, "List volumes", true) + + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display volume names") + flFilter := opts.NewListOpts(nil) + cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volFilterArgs := filters.NewArgs() + for _, f := range flFilter.GetAll() { + var err error + volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) + if err != nil { + return err + } + } + + volumes, err := cli.client.VolumeList(context.Background(), volFilterArgs) + if err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + for _, warn := range volumes.Warnings { + fmt.Fprintln(cli.err, warn) + } + fmt.Fprintf(w, "DRIVER \tVOLUME NAME") + fmt.Fprintf(w, "\n") + } + + sort.Sort(byVolumeName(volumes.Volumes)) + for _, vol := range volumes.Volumes { + if *quiet { + fmt.Fprintln(w, vol.Name) + continue + } + fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) + } + w.Flush() + return nil +} + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +// CmdVolumeInspect displays low-level information on one or more volumes. +// +// Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeInspect(args ...string) error { + cmd := Cli.Subcmd("volume inspect", []string{"VOLUME [VOLUME...]"}, "Return low-level information on a volume", true) + tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") + + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + if err := cmd.Parse(args); err != nil { + return nil + } + + inspectSearcher := func(name string) (interface{}, []byte, error) { + i, err := cli.client.VolumeInspect(context.Background(), name) + return i, nil, err + } + + return cli.inspectElements(*tmplStr, cmd.Args(), inspectSearcher) +} + +// CmdVolumeCreate creates a new volume. +// +// Usage: docker volume create [OPTIONS] +func (cli *DockerCli) CmdVolumeCreate(args ...string) error { + cmd := Cli.Subcmd("volume create", nil, "Create a volume", true) + flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name") + flName := cmd.String([]string{"-name"}, "", "Specify volume name") + + flDriverOpts := opts.NewMapOpts(nil, nil) + cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options") + + flLabels := opts.NewListOpts(nil) + cmd.Var(&flLabels, []string{"-label"}, "Set metadata for a volume") + + cmd.Require(flag.Exact, 0) + cmd.ParseFlags(args, true) + + volReq := types.VolumeCreateRequest{ + Driver: *flDriver, + DriverOpts: flDriverOpts.GetAll(), + Name: *flName, + Labels: runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()), + } + + vol, err := cli.client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", vol.Name) + return nil +} + +// CmdVolumeRm removes one or more volumes. +// +// Usage: docker volume rm VOLUME [VOLUME...] +func (cli *DockerCli) CmdVolumeRm(args ...string) error { + cmd := Cli.Subcmd("volume rm", []string{"VOLUME [VOLUME...]"}, "Remove a volume", true) + cmd.Require(flag.Min, 1) + cmd.ParseFlags(args, true) + + var status = 0 + + for _, name := range cmd.Args() { + if err := cli.client.VolumeRemove(context.Background(), name); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(cli.out, "%s\n", name) + } + + if status != 0 { + return Cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/client/wait.go b/vendor/github.com/docker/docker/api/client/wait.go new file mode 100644 index 00000000..609cd3be --- /dev/null +++ b/vendor/github.com/docker/docker/api/client/wait.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + Cli "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +// CmdWait blocks until a container stops, then prints its exit code. +// +// If more than one container is specified, this will wait synchronously on each container. +// +// Usage: docker wait CONTAINER [CONTAINER...] +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := Cli.Subcmd("wait", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["wait"].Description, true) + cmd.Require(flag.Min, 1) + + cmd.ParseFlags(args, true) + + var errs []string + for _, name := range cmd.Args() { + status, err := cli.client.ContainerWait(context.Background(), name) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 00000000..63560c6d --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,146 @@ +package api + +import ( + "fmt" + "mime" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // Version of Current REST API + DefaultVersion version.Version = "1.23" + + // MinVersion represents Minimum REST API version supported + MinVersion version.Version = "1.12" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first int + last int + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last int) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(start) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go new file mode 100644 index 00000000..cf8d2ae6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -0,0 +1,70 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// WriteError decodes a specific docker error and sends it in the response. +func WriteError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "unauthorized": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + http.Error(w, errMsg, statusCode) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 00000000..20188c12 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 00000000..59ee0308 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,107 @@ +package httputils + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/version" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as a API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// ParseMultipartForm ensures the request form is parsed, even with invalid content types. +func ParseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return json.NewEncoder(w).Encode(v) +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver version.Version) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(version.Version) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 00000000..2622bf1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,41 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/authorization" +) + +// handleWithGlobalMiddlwares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + handleVersion := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, api.MinVersion) + next = handleVersion(next) + + if s.cfg.EnableCors { + handleCORS := middleware.NewCORSMiddleware(s.cfg.CorsHeaders) + next = handleCORS(next) + } + + handleUserAgent := middleware.NewUserAgentMiddleware(s.cfg.Version) + next = handleUserAgent(next) + + // Only want this on debug level + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + if len(s.cfg.AuthorizationPluginNames) > 0 { + s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames) + handleAuthorization := middleware.NewAuthorizationMiddleware(s.authZPlugins) + next = handleAuthorization(next) + } + + return next +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/authorization.go b/vendor/github.com/docker/docker/api/server/middleware/authorization.go new file mode 100644 index 00000000..cbfa99e7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/authorization.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/authorization" + "golang.org/x/net/context" +) + +// NewAuthorizationMiddleware creates a new Authorization middleware. +func NewAuthorizationMiddleware(plugins []authorization.Plugin) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // FIXME: fill when authN gets in + // User and UserAuthNMethod are taken from AuthN plugins + // Currently tracked in https://github.com/docker/docker/pull/13994 + user := "" + userAuthNMethod := "" + authCtx := authorization.NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := authorization.NewResponseModifier(w) + + if err := handler(ctx, rw, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if err := authCtx.AuthZResponse(rw, r); err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go new file mode 100644 index 00000000..de21897d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/cors.go @@ -0,0 +1,33 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// NewCORSMiddleware creates a new CORS middleware. +func NewCORSMiddleware(defaultHeaders string) Middleware { + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + writeCorsHeaders(w, r, corsHeaders) + return handler(ctx, w, r, vars) + } + } +} + +func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go new file mode 100644 index 00000000..be7056f6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -0,0 +1,56 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + if _, exists := postForm["password"]; exists { + postForm["password"] = "*****" + } + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go new file mode 100644 index 00000000..588331ae --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/middleware.go @@ -0,0 +1,7 @@ +package middleware + +import "github.com/docker/docker/api/server/httputils" + +// Middleware is an adapter to allow the use of ordinary functions as Docker API filters. +// Any function that has the appropriate signature can be registered as a middleware. +type Middleware func(handler httputils.APIFunc) httputils.APIFunc diff --git a/vendor/github.com/docker/docker/api/server/middleware/user_agent.go b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go new file mode 100644 index 00000000..188196bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/user_agent.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +// NewUserAgentMiddleware creates a new UserAgent middleware. +func NewUserAgentMiddleware(versionCheck string) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + ctx = context.WithValue(ctx, httputils.UAStringKey, r.Header.Get("User-Agent")) + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + + // v1.20 onwards includes the GOOS of the client after the version + // such as Docker/1.7.0 (linux) + if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { + userAgent[1] = strings.Split(userAgent[1], " ")[0] + } + + if len(userAgent) == 2 && !serverVersion.Equal(version.Version(userAgent[1])) { + logrus.Debugf("Client and server don't have the same version (client: %s, server: %s)", userAgent[1], serverVersion) + } + } + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go new file mode 100644 index 00000000..41d518bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version.go @@ -0,0 +1,45 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/version" + "golang.org/x/net/context" +) + +type badRequestError struct { + error +} + +func (badRequestError) HTTPErrorStatusCode() int { + return http.StatusBadRequest +} + +// NewVersionMiddleware creates a new Version middleware. +func NewVersionMiddleware(versionCheck string, defaultVersion, minVersion version.Version) Middleware { + serverVersion := version.Version(versionCheck) + + return func(handler httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := version.Version(vars["version"]) + if apiVersion == "" { + apiVersion = defaultVersion + } + + if apiVersion.GreaterThan(defaultVersion) { + return badRequestError{fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, defaultVersion)} + } + if apiVersion.LessThan(minVersion) { + return badRequestError{fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, minVersion)} + } + + header := fmt.Sprintf("Docker/%s (%s)", serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) + return handler(ctx, w, r, vars) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/profiler.go b/vendor/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 00000000..3c0dfd08 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,40 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 00000000..839f3160 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/builder" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 00000000..dc85d1df --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.NewPostRoute("/build", r.postBuild), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 00000000..a6b787d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,213 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.Tags = r.Form["t"] + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]string{} + buildArgsJSON := r.FormValue("buildargs") + if buildArgsJSON != "" { + if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil { + return nil, err + } + options.Labels = labels + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + var ( + context builder.ModifiableContext + dockerfileName string + out io.Writer + ) + context, dockerfileName, err = builder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) + if err != nil { + return errf(err) + } + defer func() { + if err := context.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + + buildOptions.AuthConfigs = authConfigs + + out = output + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + closeNotifier := make(<-chan bool) + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + imgID, err := br.backend.Build(ctx, buildOptions, + builder.DockerIgnoreContext{ModifiableContext: context}, + stdout, stderr, out, + closeNotifier) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 00000000..bd891975 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,71 @@ +package container + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig) error + ContainerStop(name string, seconds int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version version.Version) (interface{}, error) + ContainerLogs(name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 00000000..873f13d2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,63 @@ +package container + +import "github.com/docker/docker/api/server/router" + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend) router.Router { + r := &containerRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs), + router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 00000000..016e00f0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,531 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filter: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Stop: closeNotifier, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + var closeNotifier <-chan bool + if notifier, ok := w.(http.CloseNotifier); ok { + closeNotifier = notifier.CloseNotify() + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + }, + OutStream: w, + Stop: closeNotifier, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + var hostConfig *container.HostConfig + if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { + return err + } + + hostConfig = c + } + + if err := s.backend.ContainerStart(vars["name"], hostConfig); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + seconds, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if version.GreaterThanOrEqualTo("1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + timeout, _ := strconv.Atoi(r.Form.Get("t")) + + if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ + StatusCode: status, + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + warnings, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ + Warnings: warnings, + }) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := version.LessThan("1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such container: \"\"") + } + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + + keys := []byte{} + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + MuxStreams: true, + } + + return s.backend.ContainerAttach(containerName, attachConfig) +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var keys []byte + var err error + detachKeys := r.FormValue("detachKeys") + if detachKeys != "" { + keys, err = term.ToBytes(detachKeys) + if err != nil { + logrus.Warnf("Invalid escape keys provided (%s) using default : ctrl-p ctrl-q", detachKeys) + } + } + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: keys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 00000000..69584b31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,112 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 00000000..1a3ddc4c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,134 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if version.GreaterThan("1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + if err := s.backend.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error())) + logrus.Errorf("Error running exec in container: %v\n", err) + return err + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 00000000..dbbced7e --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go new file mode 100644 index 00000000..dfb02a4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -0,0 +1,44 @@ +package image + +import ( + "io" + + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *types.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(filterArgs string, filter string, all bool) ([]*types.Image, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(newTag reference.Named, imageName string) error +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, term string, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go new file mode 100644 index 00000000..d6a1297a --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -0,0 +1,44 @@ +package image + +import "github.com/docker/docker/api/server/router" + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend) router.Router { + r := &imageRouter{ + backend: backend, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/create", r.postImagesCreate), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go new file mode 100644 index 00000000..58abd4b4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -0,0 +1,383 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { + pause = true + } + + c, _, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + newConfig, err := dockerfile.BuildFromConfig(c, r.Form["changes"]) + if err != nil { + return err + } + + commitCfg := &types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: newConfig, + MergeConfigs: true, + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + var ref reference.Named + ref, err = reference.ParseNamed(image) + if err == nil { + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(ref, dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + } + if err == nil { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, ref, metaHeaders, authConfig, output) + } + } + } else { //import + var newRef reference.Named + if repo != "" { + var err error + newRef, err = reference.ParseNamed(repo) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + src := r.Form.Get("fromSrc") + + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + var newConfig *container.Config + newConfig, err = dockerfile.BuildFromConfig(&container.Config{}, r.Form["changes"]) + if err != nil { + return err + } + + err = s.backend.ImportImage(src, newRef, message, r.Body, output, newConfig) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + ref, err := reference.ParseNamed(vars["name"]) + if err != nil { + return err + } + tag := r.Form.Get("tag") + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, ref, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + if !quiet { + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil + } + return s.backend.LoadImage(r.Body, w, quiet) +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // FIXME: The filter parameter could just be a match filter + images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + newTag, err := reference.WithName(repo) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + if err := s.backend.TagImage(newTag, vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("term"), config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go new file mode 100644 index 00000000..99db4242 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local.go @@ -0,0 +1,61 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 00000000..2de25c27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 00000000..e6284cd4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,18 @@ +package system + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SubscribeToEvents(since, sinceNano int64, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 00000000..76da5c52 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,33 @@ +package system + +import "github.com/docker/docker/api/server/router" + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend) router.Router { + r := &systemRouter{ + backend: b, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.NewGetRoute("/events", r.getEvents), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 00000000..defaa0d6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,125 @@ +package system + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + timetypes "github.com/docker/engine-api/types/time" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion.String() + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) + if err != nil { + return err + } + until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) + if err != nil { + return err + } + + var timeout <-chan time.Time + if until > 0 || untilNano > 0 { + dur := time.Unix(until, untilNano).Sub(time.Now()) + timeout = time.NewTimer(dur).C + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + var closeNotify <-chan bool + if closeNotifier, ok := w.(http.CloseNotifier); ok { + closeNotify = closeNotifier.CloseNotify() + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-closeNotify: + logrus.Debug("Client disconnected, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ + Status: status, + IdentityToken: token, + }) +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 00000000..fbf5ed27 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,15 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/engine-api/types" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 00000000..2683dcec --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,35 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 00000000..5aa0d4a7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,66 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req types.VolumeCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := v.backend.VolumeRm(vars["name"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 00000000..1ecc7a7f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go new file mode 100644 index 00000000..1379b737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -0,0 +1,195 @@ +package server + +import ( + "crypto/tls" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/pkg/authorization" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + AuthorizationPluginNames []string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + authZPlugins []authorization.Plugin + routerSwapper *routerSwapper +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Server method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.Background() + handlerFunc := s.handleWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + httputils.WriteError(w, err) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + for _, r := range routers { + s.routers = append(s.routers, r) + } + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debugf("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 00000000..ffe9b709 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,69 @@ +// Package backend includes types to send information to server backends. +// TODO(calavera): This package is pending of extraction to engine-api +// when the server package is clean of daemon dependencies. +package backend + +import ( + "io" + + "github.com/docker/engine-api/types" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys []byte + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the backend to to pass it a logging configuration. +type ContainerLogsConfig struct { + types.ContainerLogsOptions + OutStream io.Writer + Stop <-chan bool +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Stop <-chan bool + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go index 708f9738..7786c56e 100644 --- a/vendor/github.com/docker/docker/builder/context.go +++ b/vendor/github.com/docker/docker/builder/context.go @@ -29,6 +29,16 @@ func ValidateContextDirectory(srcPath string, excludes []string) error { return err } return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + // skip this directory/file if it's not in the path, it won't get added to the context if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { return err @@ -41,16 +51,6 @@ func ValidateContextDirectory(srcPath string, excludes []string) error { return nil } - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - // skip checking if symlinks point to non-existing files, such symlinks can be useful // also skip named pipes, because they hanging on open if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go new file mode 100644 index 00000000..c2e6c7da --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags return the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 00000000..daa70498 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,326 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + cancelled chan struct{} + cancelOnce sync.Once + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + + // TODO: remove once docker.Commit can receive a tag + id string +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]string) + } + b = &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: context, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + cancelled: make(chan struct{}), + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + } + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// Build creates a NewBuilder, which builds the image. +func (bm *BuildManager) Build(clientCtx context.Context, config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b, err := NewBuilder(clientCtx, config, bm.backend, context, nil) + if err != nil { + return "", err + } + img, err := b.build(config, context, stdout, stderr, out, clientGone) + return img, err + +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) { + b.options = config + b.context = context + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-finished: + case <-clientGone: + b.cancelOnce.Do(func() { + close(b.cancelled) + }) + } + + }() + + repoAndTags, err := sanitizeRepoAndTags(config.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q=%q ", k, v) + } + _, node, err := parser.ParseLine(line) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + for i, n := range b.dockerfile.Children { + select { + case <-b.cancelled: + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + if err := b.dispatch(i, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return an error, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + for _, rt := range repoAndTags { + if err := b.docker.TagImage(rt, b.image); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancelOnce.Do(func() { + close(b.cancelled) + }) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + for i, n := range ast.Children { + if err := b.dispatch(i, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go new file mode 100644 index 00000000..9e1b799d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go @@ -0,0 +1,42 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Env = "env" + Label = "label" + Maintainer = "maintainer" + Add = "add" + Copy = "copy" + From = "from" + Onbuild = "onbuild" + Workdir = "workdir" + Run = "run" + Cmd = "cmd" + Entrypoint = "entrypoint" + Expose = "expose" + Volume = "volume" + User = "user" + StopSignal = "stopsignal" + Arg = "arg" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Env: {}, + Label: {}, + Maintainer: {}, + Add: {}, + Copy: {}, + From: {}, + Onbuild: {}, + Workdir: {}, + Run: {}, + Cmd: {}, + Entrypoint: {}, + Expose: {}, + Volume: {}, + User: {}, + StopSignal: {}, + Arg: {}, +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 00000000..ac7c2b07 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,639 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + if envParts[0] == args[j] { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastOneArgument("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + workdir := filepath.FromSlash(args[0]) + + if !system.IsAbs(workdir) { + current := filepath.FromSlash(b.runConfig.WorkingDir) + workdir = filepath.Join(string(os.PathSeparator), current, workdir) + } + + b.runConfig.WorkingDir = workdir + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is +// only one argument. The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + args = append([]string{"/bin/sh", "-c"}, args...) + } else { + args = append([]string{"cmd", "/S", "/C"}, args...) + } + } + + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + if runtime.GOOS != "windows" { + cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) + } else { + cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) + } + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to +// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + if runtime.GOOS != "windows" { + b.runConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]} + } else { + b.runConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]} + } + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("Volume specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("STOPSIGNAL requires exactly one argument") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return fmt.Errorf("ARG requires exactly one argument definition") + } + + var ( + name string + value string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + name = parts[0] + value = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. + if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { + b.options.BuildArgs[name] = value + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest new file mode 100644 index 00000000..1a7fe975 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -0,0 +1,112 @@ +hello | hello +he'll'o | hello +he'llo | hello +he\'llo | he'llo +he\\'llo | he\llo +abc\tdef | abctdef +"abc\tdef" | abc\tdef +'abc\tdef' | abc\tdef +hello\ | hello +hello\\ | hello\ +"hello | hello +"hello\" | hello" +"hel'lo" | hel'lo +'hello | hello +'hello\' | hello\ +"''" | '' +$. | $. +$1 | +he$1x | hex +he$.x | he$.x +he$pwd. | he. +he$PWD | he/home +he\$PWD | he$PWD +he\\$PWD | he\/home +he\${} | he${} +he\${}xx | he${}xx +he${} | he +he${}xx | hexx +he${hi} | he +he${hi}xx | hexx +he${PWD} | he/home +he${.} | error +he${XXX:-000}xx | he000xx +he${PWD:-000}xx | he/homexx +he${XXX:-$PWD}xx | he/homexx +he${XXX:-${PWD:-yyy}}xx | he/homexx +he${XXX:-${YYY:-yyy}}xx | heyyyxx +he${XXX:YYY} | error +he${XXX:+${PWD}}xx | hexx +he${PWD:+${XXX}}xx | hexx +he${PWD:+${SHELL}}xx | hebashxx +he${XXX:+000}xx | hexx +he${PWD:+000}xx | he000xx +'he${XX}' | he${XX} +"he${PWD}" | he/home +"he'$PWD'" | he'/home' +"$PWD" | /home +'$PWD' | $PWD +'\$PWD' | \$PWD +'"hello"' | "hello" +he\$PWD | he$PWD +"he\$PWD" | he$PWD +'he\$PWD' | he\$PWD +he${PWD | error +he${PWD:=000}xx | error +he${PWD:+${PWD}:}xx | he/home:xx +he${XXX:-\$PWD:}xx | he$PWD:xx +he${XXX:-\${PWD}z}xx | he${PWDz}xx +안녕하세요 | 안녕하세요 +안'녕'하세요 | 안녕하세요 +안'녕하세요 | 안녕하세요 +안녕\'하세요 | 안녕'하세요 +안\\'녕하세요 | 안\녕하세요 +안녕\t하세요 | 안녕t하세요 +"안녕\t하세요" | 안녕\t하세요 +'안녕\t하세요 | 안녕\t하세요 +안녕하세요\ | 안녕하세요 +안녕하세요\\ | 안녕하세요\ +"안녕하세요 | 안녕하세요 +"안녕하세요\" | 안녕하세요" +"안녕'하세요" | 안녕'하세요 +'안녕하세요 | 안녕하세요 +'안녕하세요\' | 안녕하세요\ +안녕$1x | 안녕x +안녕$.x | 안녕$.x +안녕$pwd. | 안녕. +안녕$PWD | 안녕/home +안녕\$PWD | 안녕$PWD +안녕\\$PWD | 안녕\/home +안녕\${} | 안녕${} +안녕\${}xx | 안녕${}xx +안녕${} | 안녕 +안녕${}xx | 안녕xx +안녕${hi} | 안녕 +안녕${hi}xx | 안녕xx +안녕${PWD} | 안녕/home +안녕${.} | error +안녕${XXX:-000}xx | 안녕000xx +안녕${PWD:-000}xx | 안녕/homexx +안녕${XXX:-$PWD}xx | 안녕/homexx +안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +안녕${XXX:YYY} | error +안녕${XXX:+${PWD}}xx | 안녕xx +안녕${PWD:+${XXX}}xx | 안녕xx +안녕${PWD:+${SHELL}}xx | 안녕bashxx +안녕${XXX:+000}xx | 안녕xx +안녕${PWD:+000}xx | 안녕000xx +'안녕${XX}' | 안녕${XX} +"안녕${PWD}" | 안녕/home +"안녕'$PWD'" | 안녕'/home' +'"안녕"' | "안녕" +안녕\$PWD | 안녕$PWD +"안녕\$PWD" | 안녕$PWD +'안녕\$PWD' | 안녕\$PWD +안녕${PWD | error +안녕${PWD:=000}xx | error +안녕${PWD:+${PWD}:}xx | 안녕/home:xx +안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +$KOREAN | 한국어 +안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 00000000..270e3a4f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,215 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Env: env, + command.Label: label, + command.Maintainer: maintainer, + command.Add: add, + command.Copy: dispatchCopy, // copy() is a go builtin + command.From: from, + command.Onbuild: onbuild, + command.Workdir: workdir, + command.Run: run, + command.Cmd: cmd, + command.Entrypoint: entrypoint, + command.Expose: expose, + command.Volume: volume, + command.User: user, + command.StopSignal: stopSignal, + command.Arg: arg, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + if runtime.GOOS != "windows" { + return nil + } + switch command { + case "expose", "user", "stopsignal", "arg": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 00000000..a9be9fdd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,662 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/strslice" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", "#(nop) " + comment} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S /C", "REM (nop) " + comment} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + if runtime.GOOS != "windows" { + b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} + } else { + b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)} + } + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if !system.IsAbs(dest) { + hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) + + // Make sure we preserve any trailing slash + if hasSlash { + dest += string(os.PathSeparator) + } + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + return + } + tmpFile.Close() + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step)) + if err != nil { + return err + } + + for i, n := range ast.Children { + switch strings.ToUpper(n.Value) { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) + } + + if err := b.dispatch(i, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if `b.docker` implements builder.ImageCache and image-caching +// is enabled (`b.UseCache`). +// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c, ok := b.docker.(builder.ImageCache) + if !ok || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + defer close(finished) + go func() { + select { + case <-b.cancelled: + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + case <-finished: + } + }() + + if err := b.docker.ContainerStart(cID, nil); err != nil { + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + + return nil +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f) + f.Close() + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 00000000..1d7ece43 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,331 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(rest); pos++ { + if pos != len(rest) { + ch = rune(rest[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == '\\' { + if pos+1 == len(rest) { + continue // just skip \ at end + } + // If we're not quoted and we see a \, then always just + // add \ plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos++ + ch = rune(rest[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // \ is special except for ' quotes - can't escape anything for ' + if ch == '\\' && quote != '\'' { + if pos+1 == len(rest) { + phase = inWord + continue // just skip \ at end + } + pos++ + nextCh := rune(rest[pos]) + word += string(ch) + ch = nextCh + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV") +} + +func parseLabel(rest string) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL") +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) { + words := parseWords(rest) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go new file mode 100644 index 00000000..ece601a9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -0,0 +1,161 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +var ( + dispatch map[string]func(string) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string) (*Node, map[string]bool, error){ + command.User: parseString, + command.Onbuild: parseSubCommand, + command.Workdir: parseString, + command.Env: parseEnv, + command.Label: parseLabel, + command.Maintainer: parseString, + command.From: parseString, + command.Add: parseMaybeJSONToList, + command.Copy: parseMaybeJSONToList, + command.Run: parseMaybeJSON, + command.Cmd: parseMaybeJSON, + command.Entrypoint: parseMaybeJSON, + command.Expose: parseStringsWhitespaceDelimited, + command.Volume: parseMaybeJSONToList, + command.StopSignal: parseString, + command.Arg: parseNameOrNameVal, + } +} + +// ParseLine parse a line and return the remainder. +func ParseLine(line string) (string, *Node, error) { + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if tokenLineContinuation.MatchString(line) { + line = tokenLineContinuation.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + for scanner.Scan() { + scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line + newline) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + _, child, err = ParseLine(line) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go new file mode 100644 index 00000000..b21eb62a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go new file mode 100644 index 00000000..c7142667 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -0,0 +1,314 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == '\\' { + // '\' escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == '\\' { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + if name != env[:i] { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go new file mode 100644 index 00000000..38897b2c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support.go @@ -0,0 +1,16 @@ +package dockerfile + +import "strings" + +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest new file mode 100644 index 00000000..fa916c67 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/cli/cli.go b/vendor/github.com/docker/docker/cli/cli.go new file mode 100644 index 00000000..8e559fc3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cli.go @@ -0,0 +1,200 @@ +package cli + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" + "strings" + + flag "github.com/docker/docker/pkg/mflag" +) + +// Cli represents a command line interface. +type Cli struct { + Stderr io.Writer + handlers []Handler + Usage func() +} + +// Handler holds the different commands Cli will call +// It should have methods with names starting with `Cmd` like: +// func (h myHandler) CmdFoo(args ...string) error +type Handler interface{} + +// Initializer can be optionally implemented by a Handler to +// initialize before each call to one of its commands. +type Initializer interface { + Initialize() error +} + +// New instantiates a ready-to-use Cli. +func New(handlers ...Handler) *Cli { + // make the generic Cli object the first cli handler + // in order to handle `docker help` appropriately + cli := new(Cli) + cli.handlers = append([]Handler{cli}, handlers...) + return cli +} + +// initErr is an error returned upon initialization of a handler implementing Initializer. +type initErr struct{ error } + +func (err initErr) Error() string { + return err.Error() +} + +func (cli *Cli) command(args ...string) (func(...string) error, error) { + for _, c := range cli.handlers { + if c == nil { + continue + } + camelArgs := make([]string, len(args)) + for i, s := range args { + if len(s) == 0 { + return nil, errors.New("empty command") + } + camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) + } + methodName := "Cmd" + strings.Join(camelArgs, "") + method := reflect.ValueOf(c).MethodByName(methodName) + if method.IsValid() { + if c, ok := c.(Initializer); ok { + if err := c.Initialize(); err != nil { + return nil, initErr{err} + } + } + return method.Interface().(func(...string) error), nil + } + } + return nil, errors.New("command not found") +} + +// Run executes the specified command. +func (cli *Cli) Run(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + return command(args[2:]...) + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + return command(args[1:]...) + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + return cli.CmdHelp() +} + +func (cli *Cli) noSuchCommand(command string) { + if cli.Stderr == nil { + cli.Stderr = os.Stderr + } + fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) + os.Exit(1) +} + +// CmdHelp displays information on a Docker command. +// +// If more than one command is specified, information is only shown for the first command. +// +// Usage: docker help COMMAND or docker COMMAND --help +func (cli *Cli) CmdHelp(args ...string) error { + if len(args) > 1 { + command, err := cli.command(args[:2]...) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + } + if len(args) > 0 { + command, err := cli.command(args[0]) + switch err := err.(type) { + case nil: + command("--help") + return nil + case initErr: + return err.error + } + cli.noSuchCommand(args[0]) + } + + if cli.Usage == nil { + flag.Usage() + } else { + cli.Usage() + } + + return nil +} + +// Subcmd is a subcommand of the main "docker" command. +// A subcommand represents an action that can be performed +// from the Docker command line client. +// +// To see all available subcommands, run "docker --help". +func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { + var errorHandling flag.ErrorHandling + if exitOnError { + errorHandling = flag.ExitOnError + } else { + errorHandling = flag.ContinueOnError + } + flags := flag.NewFlagSet(name, errorHandling) + flags.Usage = func() { + flags.ShortUsage() + flags.PrintDefaults() + } + + flags.ShortUsage = func() { + options := "" + if flags.FlagCountUndeprecated() > 0 { + options = " [OPTIONS]" + } + + if len(synopses) == 0 { + synopses = []string{""} + } + + // Allow for multiple command usage synopses. + for i, synopsis := range synopses { + lead := "\t" + if i == 0 { + // First line needs the word 'Usage'. + lead = "Usage:\t" + } + + if synopsis != "" { + synopsis = " " + synopsis + } + + fmt.Fprintf(flags.Out(), "\n%sdocker %s%s%s", lead, name, options, synopsis) + } + + fmt.Fprintf(flags.Out(), "\n\n%s\n", description) + } + + return flags +} + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/client.go b/vendor/github.com/docker/docker/cli/client.go new file mode 100644 index 00000000..6a82eb52 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/client.go @@ -0,0 +1,12 @@ +package cli + +import flag "github.com/docker/docker/pkg/mflag" + +// ClientFlags represents flags for the docker client. +type ClientFlags struct { + FlagSet *flag.FlagSet + Common *CommonFlags + PostParse func() + + ConfigDir string +} diff --git a/vendor/github.com/docker/docker/cli/common.go b/vendor/github.com/docker/docker/cli/common.go new file mode 100644 index 00000000..7f6a24ba --- /dev/null +++ b/vendor/github.com/docker/docker/cli/common.go @@ -0,0 +1,80 @@ +package cli + +import ( + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +// CommonFlags represents flags that are common to both the client and the daemon. +type CommonFlags struct { + FlagSet *flag.FlagSet + PostParse func() + + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// Command is the struct containing the command name and description +type Command struct { + Name string + Description string +} + +var dockerCommands = []Command{ + {"attach", "Attach to a running container"}, + {"build", "Build an image from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders between a container and the local filesystem"}, + {"create", "Create a new container"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"exec", "Run a command in a running container"}, + {"export", "Export a container's filesystem as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Import the contents from a tarball to create a filesystem image"}, + {"info", "Display system-wide information"}, + {"inspect", "Return low-level information on a container or image"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive or STDIN"}, + {"login", "Log in to a Docker registry"}, + {"logout", "Log out from a Docker registry"}, + {"logs", "Fetch the logs of a container"}, + {"network", "Manage Docker networks"}, + {"pause", "Pause all processes within a container"}, + {"port", "List port mappings or a specific mapping for the CONTAINER"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from a registry"}, + {"push", "Push an image or a repository to a registry"}, + {"rename", "Rename a container"}, + {"restart", "Restart a container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save one or more images to a tar archive"}, + {"search", "Search the Docker Hub for images"}, + {"start", "Start one or more stopped containers"}, + {"stats", "Display a live stream of container(s) resource usage statistics"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Display the running processes of a container"}, + {"unpause", "Unpause all processes within a container"}, + {"update", "Update configuration of one or more containers"}, + {"version", "Show the Docker version information"}, + {"volume", "Manage Docker volumes"}, + {"wait", "Block until a container stops, then print its exit code"}, +} + +// DockerCommands stores all the docker command +var DockerCommands = make(map[string]Command) + +func init() { + for _, cmd := range dockerCommands { + DockerCommands[cmd.Name] = cmd + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go new file mode 100644 index 00000000..510cf8cf --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/engine-api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go new file mode 100644 index 00000000..3cdc8c38 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "github.com/docker/containerd/subreaper/exec" + + "github.com/docker/docker/cliconfig" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *cliconfig.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 00000000..63e8ed40 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go new file mode 100644 index 00000000..864c540f --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 00000000..519ef53d --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go new file mode 100644 index 00000000..8e7edd62 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go @@ -0,0 +1,67 @@ +package credentials + +import ( + "strings" + + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *cliconfig.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *cliconfig.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range c.file.AuthConfigs { + if serverAddress == convertToHostname(registry) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go new file mode 100644 index 00000000..9b8997dd --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go @@ -0,0 +1,196 @@ +package credentials + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/engine-api/types" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// Standarize the not found error, so every helper returns +// the same message and docker can handle it properly. +var errCredentialsNotFound = errors.New("credentials not found in native keychain") + +// command is an interface that remote executed commands implement. +type command interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// credentialsRequest holds information shared between docker and a remote credential store. +type credentialsRequest struct { + ServerURL string + Username string + Secret string +} + +// credentialsGetResponse is the information serialized from a remote store +// when the plugin sends requests to get the user credentials. +type credentialsGetResponse struct { + Username string + Secret string +} + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + commandFn func(args ...string) command + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *cliconfig.ConfigFile) Store { + return &nativeStore{ + commandFn: shellCommandFn(file.CredentialsStore), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := c.eraseCredentialsFromStore(serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, _ := c.fileStore.GetAll() + + for s, ac := range auths { + creds, _ := c.getCredentialsFromStore(s) + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + auths[s] = ac + } + + return auths, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + cmd := c.commandFn("store") + creds := &credentialsRequest{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error adding credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + cmd := c.commandFn("get") + cmd.Input(strings.NewReader(serverAddress)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + if t == errCredentialsNotFound.Error() { + return ret, nil + } + + logrus.Debugf("error getting credentials - err: %v, out: `%s`", err, t) + return ret, fmt.Errorf(t) + } + + var resp credentialsGetResponse + if err := json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return ret, err + } + + if resp.Username == tokenUsername { + ret.IdentityToken = resp.Secret + } else { + ret.Password = resp.Secret + ret.Username = resp.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// eraseCredentialsFromStore executes the command to remove the server credentails from the native store. +func (c *nativeStore) eraseCredentialsFromStore(serverURL string) error { + cmd := c.commandFn("erase") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + logrus.Debugf("error erasing credentials - err: %v, out: `%s`", err, t) + return fmt.Errorf(t) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go new file mode 100644 index 00000000..b2afde63 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/shell_command.go @@ -0,0 +1,28 @@ +package credentials + +import ( + "io" + "github.com/docker/containerd/subreaper/exec" +) + +func shellCommandFn(storeName string) func(args ...string) command { + name := remoteCredentialsPrefix + storeName + return func(args ...string) command { + return &shell{cmd: exec.Command(name, args...)} + } +} + +// shell invokes shell commands to talk with a remote credentials helper. +type shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 00000000..95b68285 --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,69 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/engine-api/types" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// a error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 00000000..9bdd112d --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,649 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + *runconfig.StreamConfig + // embed for Container to support states directly. + *State `json:"State"` // Needed for remote api version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: runconfig.NewStreamConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Create(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Create(pth) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + // If can't mount container FS at this point (eg Hyper-V Containers on + // Windows) bail out now with no action. + if !container.canMountFS() { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + if container.restartManager != nil { + container.restartManager.Cancel() + } +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + if container.HostConfig.Privileged { + return "" + } + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debugf("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debugf("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + for i, key := range keys { + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + nr, er = src.Read(buf) + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestartOnBoot decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestartOnBoot() bool { + return container.HostConfig.RestartPolicy.Name == "always" || + (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || + (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) +} + +// AddBindMountPoint adds a new bind mount point configuration to the container. +func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Source: source, + Destination: destination, + RW: rw, + } +} + +// AddLocalMountPoint adds a new local mount point configuration to the container. +func (container *Container) AddLocalMountPoint(name, destination string, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: name, + Driver: volume.DefaultDriverName, + Destination: destination, + RW: rw, + } +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager(false).(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instace connected to container. +func (container *Container) RestartManager(reset bool) restartmanager.RestartManager { + if reset { + container.RestartCount = 0 + container.restartManager = nil + } + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy) + } + return container.restartManager +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initialize or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancel attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 00000000..754090f9 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,405 @@ +// +build linux freebsd + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container +const DefaultSHMSize int64 = 67108864 + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// appendNetworkMounts appends any network mounts to the array of mount points passed in +func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { + for _, mnt := range container.NetworkMounts() { + dest, err := container.GetResourcePath(mnt.Destination) + if err != nil { + return nil, err + } + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) + } + return volumeMounts, nil +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: volume.DefaultPropagationMode, + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + path, err := v.Mount() + if err != nil { + return err + } + defer v.Unmount() + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: volume.DefaultPropagationMode, + }) + } + + return mounts +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { + var ( + volumeMounts []volume.MountPoint + err error + ) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + return err + } + + volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume}) + } + + // Append any network mounts to the list (this is a no-op on Windows) + if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { + return err + } + + for _, volumeMount := range volumeMounts { + if forceSyscall { + if err := detachMounted(volumeMount.Destination); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) + } + } + + if volumeMount.Volume != nil { + if err := volumeMount.Volume.Unmount(); err != nil { + return err + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + + return nil +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() []Mount { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + return mounts +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// canMountFS determines if the file system for the container +// can be mounted locally. A no-op on non-Windows platforms +func (container *Container) canMountFS() bool { + return true +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 00000000..c80c2aa0 --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 00000000..9fa1165d --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,92 @@ +package container + +import "sync" + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + c.RLock() + res := c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asyncronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 00000000..ba82d875 --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,60 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// supervisor defines the interface that a supervisor must implement +type supervisor interface { + // LogContainerEvent generates events related to a given container + LogContainerEvent(*Container, string) + // Cleanup ensures that the container is properly unmounted + Cleanup(*Container) + // StartLogging starts the logging driver for the container + StartLogging(*Container) error + // Run starts a container + Run(c *Container) error + // IsShuttingDown tells whether the supervisor is shutting down or not + IsShuttingDown() bool +} + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warnf("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 00000000..c52abed2 --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 00000000..a12a193e --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,283 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCode int + Error string // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitRunning waits until state is running. If state is already +// running it returns immediately. If you want wait forever you must +// supply negative timeout. Returns pid, that was passed to +// SetRunning. +func (s *State) WaitRunning(timeout time.Duration) (int, error) { + s.Lock() + if s.Running { + pid := s.Pid + s.Unlock() + return pid, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.GetPID(), nil +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStoppedLocking +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCode + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + return s.getExitCode(), nil +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +func (s *State) getExitCode() int { + s.Lock() + res := s.ExitCode + s.Unlock() + return res +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.Error = "" + s.Running = true + s.Paused = false + s.Restarting = false + s.ExitCode = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } + close(s.waitChan) // fire waiters for start + s.waitChan = make(chan struct{}) +} + +// SetStoppedLocking locks the container state is sets it to "stopped". +func (s *State) SetStoppedLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetStopped(exitStatus) + s.Unlock() +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestartingLocking is when docker handles the auto restart of containers when they are +// in the middle of a stop and being restarted again +func (s *State) SetRestartingLocking(exitStatus *ExitStatus) { + s.Lock() + s.SetRestarting(exitStatus) + s.Unlock() +} + +// SetRestarting sets the container state to "restarting". +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.Error = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress make the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go new file mode 100644 index 00000000..8d25a237 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCode = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 00000000..042fb1a3 --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go new file mode 100644 index 00000000..e4065b4a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -0,0 +1,30 @@ +// +build linux + +package daemon + +import ( + "github.com/Sirupsen/logrus" + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func installDefaultAppArmorProfile() { + if apparmor.IsEnabled() { + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + apparmorProfiles := []string{defaultApparmorProfile} + + // Allow daemon to run if loading failed, but are active + // (possibly through another run, manually, or via system startup) + for _, policy := range apparmorProfiles { + if err := aaprofile.IsLoaded(policy); err != nil { + logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go new file mode 100644 index 00000000..f186a68a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package daemon + +func installDefaultAppArmorProfile() { +} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go new file mode 100644 index 00000000..fb023689 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -0,0 +1,432 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/engine-api/types" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ErrRootFSReadOnly is returned when a container +var ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.UnmountVolumes(true, daemon.LogVolumeEvent) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.UnmountVolumes(true, daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.UnmountVolumes(true, daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go new file mode 100644 index 00000000..fcea13c9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -0,0 +1,57 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/container" + "os" + "path/filepath" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go new file mode 100644 index 00000000..79e9cd51 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -0,0 +1,120 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/errors" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, c.DetachKeys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(container) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(container, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + <-container.Attach(stdinPipe, stdout, stderr, keys) + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.WaitStop(-1 * time.Second) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go new file mode 100644 index 00000000..c99485f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go new file mode 100644 index 00000000..5bc5b9d5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -0,0 +1,15 @@ +package daemon + +import "github.com/docker/docker/pkg/archive" + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + return daemon.changes(container) +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go new file mode 100644 index 00000000..7cdf80c7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -0,0 +1,233 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "time" + + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + if userConf.ExposedPorts == nil { + userConf.ExposedPorts = make(nat.PortSet) + } + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + if imageConf.Labels != nil { + for l := range userConf.Labels { + imageConf.Labels[l] = userConf.Labels[l] + } + userConf.Labels = imageConf.Labels + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *types.ContainerCommitConfig) (string, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows + if runtime.GOOS == "windows" && container.IsRunning() { + return "", fmt.Errorf("Windows does not support commit of a running container") + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + if c.MergeConfigs { + if err := merge(c.Config, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: c.Config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImage(newTag, id.String()); err != nil { + return "", err + } + } + + attributes := map[string]string{ + "comment": c.Comment, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go new file mode 100644 index 00000000..c293935c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which are +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + ExecRoot string `json:"exec-root,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} +} + +// InstallCommonFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { + config.ServiceOptions.InstallCliFlags(cmd, usageFn) + + cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options")) + cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last")) + cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set runtime execution options")) + cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) + cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) + cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files")) + cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) + cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) + cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) + cmd.BoolVar(&config.RawLogs, []string{"-raw-logs"}, false, usageFn("Full timestamps without ANSI coloring")) + // FIXME: why the inconsistency between "hosts" and "sockets"? + cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) + cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) + cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) + cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) + cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) + cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options")) + cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) + cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store")) + cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := validateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := validateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overriden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup("-" + key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *flag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + flagName := "-" + key + if flag := flags.Lookup(flagName); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *flag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *flag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload without trailing dashes + for _, name := range f.Names { + name = strings.TrimLeft(name, "-") + + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// validateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch +func validateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go new file mode 100644 index 00000000..ceb7c382 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -0,0 +1,8 @@ +// +build experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_stub.go b/vendor/github.com/docker/docker/daemon/config_stub.go new file mode 100644 index 00000000..796e6b6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_stub.go @@ -0,0 +1,8 @@ +// +build !experimental + +package daemon + +import flag "github.com/docker/docker/pkg/mflag" + +func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go new file mode 100644 index 00000000..5394949e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -0,0 +1,88 @@ +// +build linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // Fields below here are platform specific. + + CorsHeaders string `json:"api-cors-headers,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + CgroupParent string `json:"cgroup-parent,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-mask,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + DefaultIP net.IP `json:"ip,omitempty"` + Iface string `json:"bridge,omitempty"` + IP string `json:"bip,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +// Subsequent calls to `flag.Parse` will populate config with values parsed +// from the command-line. +func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(cmd, usageFn) + + // Then platform-specific install flags + cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) + cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) + config.Ulimits = make(map[string]*units.Ulimit) + cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Set default ulimits for containers")) + cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) + cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) + cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) + cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) + cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) + cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) + cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) + cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) + cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) + cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) + cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) + cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) + cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) + cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) + cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) + cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket")) + + config.attachExperimentalFlags(cmd, usageFn) +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go new file mode 100644 index 00000000..e4ed08c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -0,0 +1,319 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/specs/specs-go" +) + +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + + return env, nil +} + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { + return err + } + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func specDevice(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := deviceMapping.PathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := os.Readlink(deviceMapping.PathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = deviceMapping.PathInContainer + return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1) + devs = append(devs, specDevice(childDevice)) + devPermissions = append(devPermissions, specDeviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) +} + +func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device { + if len(userDevices) == 0 { + return defaultDevices + } + + paths := map[string]*configs.Device{} + for _, d := range userDevices { + paths[d.Path] = d + } + + var devs []*configs.Device + for _, d := range defaultDevices { + if _, defined := paths[d.Path]; !defined { + devs = append(devs, d) + } + } + return append(devs, userDevices...) +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go new file mode 100644 index 00000000..34f0aa2d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" + "github.com/opencontainers/runc/libcontainer/label" +) + +// ContainerCreate creates a container. +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { + if params.Config == nil { + return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return types.ContainerCreateResponse{}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, err + } + + container, err := daemon.create(params) + if err != nil { + return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + + return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + if err := daemon.Register(container); err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.removeMountPoints(container, true); err != nil { + logrus.Error(err) + } + } + }() + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + + if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { + return nil, err + } + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() { + return label.DisableSecOpt(), nil + } + if ipcContainer := ipcMode.Container(); ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + + return label.DupSecOpt(c.ProcessLabel), nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer) + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the remote API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + return volumeToAPIType(v), nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go new file mode 100644 index 00000000..37c4a911 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -0,0 +1,76 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if !mnt.CopyData || mnt.Volume == nil { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go new file mode 100644 index 00000000..f12034c6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -0,0 +1,1538 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + eventtypes "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + registrytypes "github.com/docker/engine-api/types/registry" + "github.com/docker/engine-api/types/strslice" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/distribution" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +const ( + // maxDownloadConcurrency is the maximum number of downloads that + // may take place at a time for each pull. + maxDownloadConcurrency = 3 + // maxUploadConcurrency is the maximum number of uploads that + // may take place at a time for each push. + maxUploadConcurrency = 5 +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService *registry.Service + EventsService *events.Events + volumes *store.VolumeStore + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + defaultIsolation containertypes.Isolation // Default isolation mode on Windows +} + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.NewInputPipes() + } else { + c.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) restore() error { + var ( + debug = utils.IsDebugEnabled() + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + if !debug { + logrus.Info("Loading containers: start.") + } + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if !debug && logrus.GetLevel() == logrus.InfoLevel { + fmt.Print(".") + } + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + restartContainers := make(map[*container.Container]chan struct{}) + for _, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + continue + } + } + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + if c.IsRunning() || c.IsPaused() { + // Fix activityCount such that graph mounts can be unmounted later + if err := daemon.layerStore.ReinitRWLayer(c.RWLayer); err != nil { + logrus.Errorf("Failed to ReinitRWLayer for %s due to %s", c.ID, err) + return + } + if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(c.RestartManager(true))); err != nil { + logrus.Errorf("Failed to restore with containerd: %q", err) + return + } + } + // fixme: only if not running + // get list of containers we need to restart + if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestartOnBoot() { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } + }(c) + } + wg.Wait() + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + if err := daemon.containerStart(c); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + if !debug { + if logrus.GetLevel() == logrus.InfoLevel { + fmt.Println() + } + logrus.Info("Loading containers: done.") + } + + return nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(name) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + daemon.generateHostname(id, config) + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, sinceNano int64, filter filters.Args) ([]eventtypes.Message, chan interface{}) { + ef := events.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, sinceNano, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// GetLabels for a container or image id +func (daemon *Daemon) GetLabels(id string) map[string]string { + // TODO: TestCase + container := daemon.containers.Get(id) + if container != nil { + return container.Config.Labels + } + + img, err := daemon.GetImage(id) + if err == nil { + return img.ContainerConfig.Labels + } + return nil +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService *registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + setupDumpStackTrap() + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + // Verify logging driver type + if config.LogConfig.Type != "none" { + if _, err := logger.GetLogDriver(config.LogConfig.Type); err != nil { + return nil, fmt.Errorf("error finding the logging driver: %v", err) + } + } + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + installDefaultAppArmorProfile() + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, maxDownloadConcurrency) + d.uploadManager = xfer.NewLayerUploadManager(maxUploadConcurrency) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := configureVolumes(config, rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { + return nil, fmt.Errorf("Couldn't restore custom images: %s", err) + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux/FreeBSD. + if runtime.GOOS != "windows" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.RegistryService = registryService + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpause it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System doesn not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(10 * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in 10 second of SIGTERM, sending SIGKILL to force", c.ID) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in 10 seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, 10); err != nil { + return fmt.Errorf("Stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + if daemon.containers != nil { + logrus.Debug("starting clean shutdown of all containers...") + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + return nil +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) { + return container.RWLayer.Changes() +} + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(newTag reference.Named, imageName string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} + +func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + DownloadManager: daemon.downloadManager, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.PullImage(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Save(names, outStream) +} + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + writeDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + LayerStore: daemon.layerStore, + ImageStore: daemon.imageStore, + ReferenceStore: daemon.referenceStore, + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err := distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore) + return imageExporter.Load(inTar, outStream, quiet) +} + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + + return history, nil +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.ID(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return id, nil + } + if tagged, ok := ref.(reference.NamedTagged); ok { + if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { + for _, namedRef := range daemon.referenceStore.References(id) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// GetCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` +// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. +func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { + cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) + if cache == nil || err != nil { + return "", err + } + return cache.ID().String(), nil +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return setupInitLayer(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + logCfg := daemon.getLogConfig(hostConfig.LogConfig) + if err := logger.ValidateLogOpts(logCfg.Type, logCfg.Config); err != nil { + return nil, err + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) + } + } + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} + +// Checks if the client set configurations for more than one network while creating a container +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return errors.NewBadRequestError(err) +} + +func configureVolumes(config *Config, rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(config.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + return store.New(config.Root) +} + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(authConfig, dockerversion.DockerUserAgent(ctx)) +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, term string, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + return daemon.RegistryService.Search(term, authConfig, dockerversion.DockerUserAgent(ctx), headers) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + return stats, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// This are the settings that Reload changes: +// - Daemon labels. +// - Cluster discovery (reconfigure and restart). +func (daemon *Daemon) Reload(config *Config) error { + daemon.configStore.reloadLock.Lock() + defer daemon.configStore.reloadLock.Unlock() + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + return nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 00000000..3fd0e765 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 00000000..9bdf6e2b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_stub.go b/vendor/github.com/docker/docker/daemon/daemon_stub.go new file mode 100644 index 00000000..40e8ddc8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_stub.go @@ -0,0 +1,9 @@ +// +build !experimental + +package daemon + +import "github.com/docker/engine-api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go new file mode 100644 index 00000000..1e2e5ceb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -0,0 +1,955 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/engine-api/types" + pblkiodev "github.com/docker/engine-api/types/blkiodev" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + } else { + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warnf("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.13, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("Invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("Invalid --security-opt 2: %q", opt) + } + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioReadIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceReadIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadIOpsDevice = append(blkioReadIOpsDevice, d) + } + + return blkioReadIOpsDevice, nil +} + +func getBlkioWriteIOpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteIOpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, iopsDevice := range config.BlkioDeviceWriteIOps { + if err := syscall.Stat(iopsDevice.Path, &stat); err != nil { + return nil, err + } + rate := iopsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteIOpsDevice = append(blkioWriteIOpsDevice, d) + } + + return blkioWriteIOpsDevice, nil +} + +func getBlkioReadBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioReadBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceReadBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioReadBpsDevice = append(blkioReadBpsDevice, d) + } + + return blkioReadBpsDevice, nil +} + +func getBlkioWriteBpsDevices(config containertypes.Resources) ([]specs.ThrottleDevice, error) { + var blkioWriteBpsDevice []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, bpsDevice := range config.BlkioDeviceWriteBps { + if err := syscall.Stat(bpsDevice.Path, &stat); err != nil { + return nil, err + } + rate := bpsDevice.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWriteBpsDevice = append(blkioWriteBpsDevice, d) + } + + return blkioWriteBpsDevice, nil +} + +func checkKernelVersion(k, major, minor int) bool { + if v, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("%s", err) + } else { + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + if !checkKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + if hostConfig.SecurityOpt == nil { + hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) + if err != nil { + return err + } + } + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100.", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") + logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.") + logrus.Warnf("Your kernel does not support pids limit capabilities, pids limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + resources.CPUQuota = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.") + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") + logrus.Warnf("Your kernel does not support Block I/O weight_device. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") + logrus.Warnf("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size must be greater then 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000].", hostConfig.OomScoreAdj) + } + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + if hostConfig.ReadonlyRootfs { + return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return warnings, nil +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if selinuxEnabled() { + // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled + if driverName == "overlay" { + return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) + } + logrus.Debug("SELinux enabled successfully") + } else { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func setupInitLayer(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := user.LookupUid(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := user.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := user.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to a unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := user.LookupGid(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := user.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exists + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + //An error from daemon.GetContainer() means this name could not be found + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Unix has no custom images to register + return nil +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read = time.Unix(int64(stats.Timestamp), 0) + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 00000000..987528f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go new file mode 100644 index 00000000..c4a11b07 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + psignal "github.com/docker/docker/pkg/signal" +) + +func setupDumpStackTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + psignal.DumpStacks() + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go new file mode 100644 index 00000000..fef1bd77 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows + +package daemon + +func setupDumpStackTrap() { + return +} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go new file mode 100644 index 00000000..ec9d5c5f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -0,0 +1,157 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" + "github.com/docker/engine-api/types" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + return nil + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove) + if err == nil || config.ForceRemove { + if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { + logrus.Error(e) + } + } + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return errors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the remote API +func (daemon *Daemon) VolumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + if volumestore.IsInUse(err) { + err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) + return errors.NewRequestConflictError(err) + } + return fmt.Errorf("Error while removing volume %s: %v", name, err) + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 00000000..131c9a1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 00000000..7ffffbb6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" + "github.com/docker/engine-api/types/events" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go new file mode 100644 index 00000000..ac1c98cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -0,0 +1,142 @@ +package events + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/pubsub" + eventtypes "github.com/docker/engine-api/types/events" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, sinceNano int64, ef *Filter) ([]eventtypes.Message, chan interface{}) { + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, sinceNano, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted before a specific date. +// The date is splitted in two values: +// - the `since` argument is a date timestamp without nanoseconds, or -1 to return an empty slice. +// - the `sinceNano` argument is the nanoseconds offset from the timestamp. +// It uses `time.Unix(seconds, nanoseconds)` to generate a valid date with those two first arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, sinceNano int64, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since == -1 { + return buffered + } + + sinceNanoUnix := time.Unix(since, sinceNano).UnixNano() + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + if ev.TimeNano < sinceNanoUnix { + break + } + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go new file mode 100644 index 00000000..8936e371 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -0,0 +1,82 @@ +package events + +import ( + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/events" + "github.com/docker/engine-api/types/filters" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.filter.ExactMatch("event", ev.Action) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchContainer(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go new file mode 100644 index 00000000..be06845c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -0,0 +1,246 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/strslice" +) + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via remote API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(config.Container) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + logrus.Warnf("Wrong escape keys provided (%s, error: %s) using default : ctrl-p ctrl-q", config.DetachKeys, err.Error()) + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debugf("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.NewInputPipes() + } else { + ec.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return nil + } + + attachErr := container.AttachStreams(context.Background(), ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + if err := d.containerd.AddProcess(c.ID, name, p); err != nil { + return err + } + + err = <-attachErr + if err != nil { + return fmt.Errorf("attach failed with error: %v", err) + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 00000000..bbeb1c16 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,93 @@ +package exec + +import ( + "sync" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + *runconfig.StreamConfig + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: runconfig.NewStreamConfig(), + } +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go new file mode 100644 index 00000000..a2c86b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &libcontainerd.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go new file mode 100644 index 00000000..80d7dbb2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -0,0 +1,55 @@ +package daemon + +import ( + "fmt" + "io" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 00000000..572fc9be --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,32 @@ +package graphdriver + +import "sync" + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]int + mu sync.Mutex +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter() *RefCounter { + return &RefCounter{counts: make(map[string]int)} +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(id string) int { + c.mu.Lock() + c.counts[id]++ + count := c.counts[id] + c.mu.Unlock() + return count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(id string) int { + c.mu.Lock() + c.counts[id]-- + count := c.counts[id] + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 00000000..7d81a83a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,169 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 00000000..47cddbea --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,486 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "github.com/docker/containerd/subreaper/exec" + "path" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When a overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with a empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + pathCacheLock sync.Mutex + pathCache map[string]string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +var backingFs = "" + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs or aufs + switch fsMagic { + case graphdriver.FsMagicBtrfs: + logrus.Error("'overlay' is not supported over btrfs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicAufs: + logrus.Error("'overlay' is not supported over aufs.") + return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicZfs: + logrus.Error("'overlay' is not supported over zfs.") + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + d := &Driver{ + home: home, + pathCache: make(map[string]string), + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(), + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup simply returns nil and do not change the existing filesystem. +// This is required to satisfy the graphdriver.Driver interface. +func (d *Driver) Cleanup() error { + return nil +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent, mountLabel string) (retErr error) { + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do a overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + d.pathCacheLock.Lock() + delete(d.pathCache, id) + d.pathCacheLock.Unlock() + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + d.pathCacheLock.Lock() + d.pathCache[id] = rootDir + d.pathCacheLock.Unlock() + return rootDir, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + lowerDir := path.Join(d.dir(string(lowerID)), "root") + upperDir := path.Join(dir, "upper") + workDir := path.Join(dir, "work") + mergedDir := path.Join(dir, "merged") + + if count := d.ctr.Increment(id); count > 1 { + return mergedDir, nil + } + + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + + // if it's mounted already, just return + mounted, err := d.mounted(mergedDir) + if err != nil { + d.ctr.Decrement(id) + return "", err + } + if mounted { + d.ctr.Decrement(id) + return mergedDir, nil + } + + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + d.ctr.Decrement(id) + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(id) + syscall.Unmount(mergedDir, 0) + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + d.ctr.Decrement(id) + syscall.Unmount(mergedDir, 0) + return "", err + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mergedDir + d.pathCacheLock.Unlock() + + return mergedDir, nil +} + +func (d *Driver) mounted(dir string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicOverlay, dir) +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + if count := d.ctr.Decrement(id); count > 0 { + return nil + } + d.pathCacheLock.Lock() + mountpoint, exists := d.pathCache[id] + d.pathCacheLock.Unlock() + + if !exists { + logrus.Debugf("Put on a non-mounted device %s", id) + // but it might be still here + if d.Exists(id) { + mountpoint = path.Join(d.dir(id), "merged") + } + + d.pathCacheLock.Lock() + d.pathCache[id] = mountpoint + d.pathCacheLock.Unlock() + } + + if mounted, err := d.mounted(mountpoint); mounted || err != nil { + if err = syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return err + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return a ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 00000000..3dbb4de4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 00000000..3a952642 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 00000000..98fad23b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 00000000..00d9f8ec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,135 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent, mountLabel string) error { + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + opts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(opts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go new file mode 100644 index 00000000..7c6329a6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -0,0 +1,371 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !(force || len(repoRefs) > 1) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID) + + // If this is a tag reference and all the remaining references + // to this image are digest references, delete the remaining + // references so that they don't prevent removal of the image. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical { + foundTagRef = true + break + } + } + if !foundTagRef { + for _, repoRef := range repoRefs { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } + repoRefs = []reference.Named{} + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is exactly one + // repository reference to the image then we will want to + // remove that reference. + // FIXME: Is this the behavior we want? + if len(repoRefs) == 1 { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + parsedRef, err := daemon.removeImageRef(repoRefs[0]) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + + return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in one or more repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images.go new file mode 100644 index 00000000..e4c3797f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "fmt" + "path" + "sort" + + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.Image + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + imageFilters, err := filters.FromParam(filterArgs) + if err != nil { + return nil, err + } + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + images := []*types.Image{} + + var filterTagged bool + if filter != "" { + filterRef, err := reference.ParseNamed(filter) + if err == nil { // parse error means wildcard repo + if _, ok := filterRef.(reference.NamedTagged); ok { + filterTagged = true + } + } + } + + for id, img := range allImages { + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id) { + if filter != "" { // filter by tag/repo name + if filterTagged { // filter by tag, require full ref match + if ref.String() != filter { + continue + } + } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if filter != "" { // skip images with no references if filtering by tag + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly { + continue + } + + images = append(images, newImage) + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +func newImage(image *image.Image, size int64) *types.Image { + newImage := new(types.Image) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = size + newImage.VirtualSize = size + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/import.go new file mode 100644 index 00000000..4961a30f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/import.go @@ -0,0 +1,109 @@ +package daemon + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/docker/engine-api/types/container" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, newRef reference.Named, msg string, inConfig io.ReadCloser, outStream io.Writer, config *container.Config) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + ) + + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImage(newRef, id.String()); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go new file mode 100644 index 00000000..c94cf9ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -0,0 +1,162 @@ +package daemon + +import ( + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/engine-api/types" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: runtime.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: utils.ExperimentalBuild(), + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + } + + // TODO Windows. Refactor this more once sysinfo is refactored into + // platform specific code. On Windows, sysinfo.cgroupMemInfo and + // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if + // an attempt is made to access through them. + if runtime.GOOS != "windows" { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + } + + if hostname, err := os.Hostname(); err == nil { + v.Name = hostname + } + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: utils.ExperimentalBuild(), + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go new file mode 100644 index 00000000..2f810e3e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -0,0 +1,245 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) { + switch { + case version.LessThan("1.20"): + return daemon.containerInspectPre120(name) + case version.Equal("1.20"): + return daemon.containerInspect120(name) + } + return daemon.containerInspectCurrent(name, size) +} + +func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: container.NetworkSettings.Networks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // we need this trick to preserve empty log driver, so + // container will use daemon defaults even if daemon changes them + if hostConfig.LogConfig.Type == "" { + hostConfig.LogConfig.Type = daemon.defaultLogConfig.Type + } + + if len(hostConfig.LogConfig.Config) == 0 { + hostConfig.LogConfig.Config = daemon.defaultLogConfig.Config + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode, + Error: container.State.Error, + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + if err != nil { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + return volumeToAPIType(v), nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go new file mode 100644 index 00000000..6033c02d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -0,0 +1,91 @@ +// +build !windows + +package daemon + +import ( + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p19" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go new file mode 100644 index 00000000..3967f0f2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -0,0 +1,153 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending %d to %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + container.ExitOnNext() + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on it's next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("%s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if container.IsRunning() { + container.WaitStop(2 * time.Second) + if container.IsRunning() { + return err + } + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go new file mode 100644 index 00000000..7f691d4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go new file mode 100644 index 00000000..af15de04 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go new file mode 100644 index 00000000..44ab3cbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -0,0 +1,515 @@ +package daemon + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/filters" + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "since": true, + "volume": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // FIXME Remove this for 1.12 as --since and --before are deprecated + // beforeContainer is a filter to ignore containers that appear before the one given + beforeContainer *container.Container + // sinceContainer is a filter to stop the filtering when the iterator arrive to the given container + sinceContainer *container.Container + + // beforeFilter is a filter to ignore containers that appear before the one given + // this is used for --filter=before= and --before=, the latter is deprecated. + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + // this is used for --filter=since= and --since=, the latter is deprecated. + sinceFilter *container.Container + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + containers := []*types.Container{} + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + for _, container := range daemon.List() { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filter + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + // FIXME remove this for 1.12 as --since and --before are deprecated + var beforeContainer, sinceContainer *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Before != "" { + beforeContainer, err = daemon.GetContainer(config.Before) + if err != nil { + return nil, err + } + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if config.Since != "" { + sinceContainer, err = daemon.GetContainer(config.Since) + if err != nil { + return nil, err + } + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeContainer: beforeContainer, + sinceContainer: sinceContainer, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + // FIXME remove the ctx.beforContainer and ctx.sinceContainer part of the condition for 1.12 as --since and --before are deprecated + if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeContainer == nil && ctx.sinceContainer == nil { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.beforeContainer != nil { + if container.ID == ctx.beforeContainer.ID { + ctx.beforeContainer = nil + } + return excludeContainer + } + + // FIXME remove this for 1.12 as --since and --before are deprecated + if ctx.sinceContainer != nil { + if container.ID == ctx.sinceContainer.ID { + return stopIteration + } + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode && !container.Running { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: p, + PublicPort: h, + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + danglingOnly = false + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + if volFilters.Include("dangling") { + if volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !volFilters.ExactMatch("dangling", "false") && !volFilters.ExactMatch("dangling", "0") { + return nil, nil, fmt.Errorf("Invalid filter 'dangling=%s'", volFilters.Get("dangling")) + } + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + if volFilters.Include("dangling") { + volumes = daemon.volumes.FilterByUsed(volumes, !danglingOnly) + } + for _, v := range volumes { + volumesOut = append(volumesOut, volumeToAPIType(v)) + } + return volumesOut, warnings, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 00000000..8dccbe4e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go new file mode 100644 index 00000000..568770e0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -0,0 +1,8 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/context.go new file mode 100644 index 00000000..eb54c311 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/context.go @@ -0,0 +1,112 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + for _, arg := range ctx.ContainerArgs { + terms = append(terms, arg) + } + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 00000000..436c0a8f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,86 @@ +package logger + +import ( + "bufio" + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +// Copier can copy logs from specified sources to Logger and attach +// ContainerID and Timestamp. +// Writes are concurrent, so you need implement some sync in your logger +type Copier struct { + // cid is the container id for which we are copying logs + cid string + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(cid string, srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + cid: cid, + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + reader := bufio.NewReader(src) + + for { + select { + case <-c.closed: + return + default: + line, err := reader.ReadBytes('\n') + line = bytes.TrimSuffix(line, []byte{'\n'}) + + // ReadBytes can return full or partial output even when it failed. + // e.g. it can return a full entry and EOF. + if err == nil || len(line) > 0 { + if logErr := c.dst.Log(&Message{ContainerID: c.cid, Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) + } + } + + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + } + return + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + select { + case <-c.closed: + default: + close(c.closed) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 00000000..5ec0f673 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,89 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.registry[name]; ok { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + lf.registry[name] = c + return nil +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + l := factory.getLogOptValidator(name) + if l != nil { + return l(cfg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 00000000..9faa4e02 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,147 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + err = (&jsonlog.JSONLogs{ + Log: append(msg.Line, '\n'), + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 00000000..0c8fb5e5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,235 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + return + } + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.mu.Lock() + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + fileWatcher, err := filenotify.New() + if err != nil { + logWatcher.Err <- err + } + defer func() { + f.Close() + fileWatcher.Close() + }() + name := f.Name() + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + logrus.Debugf("error watching log file for modifications: %v", err) + logWatcher.Err <- err + return + } + } + + var retries int + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + continue + } + + return + } + + select { + case <-fileWatcher.Events(): + dec = json.NewDecoder(f) + continue + case <-fileWatcher.Errors(): + logWatcher.Err <- err + return + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + return + case <-notifyRotate: + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + + if err = fileWatcher.Add(name); err != nil { + logWatcher.Err <- err + return + } + if err != nil { + logWatcher.Err <- err + return + } + + dec = json.NewDecoder(f) + continue + } + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-logWatcher.WatchClose(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 00000000..cf8d571f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,87 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents record from some container. +type Message struct { + ContainerID string + Line []byte + Source string + Timestamp time.Time +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + select { + case <-w.closeNotifier: + default: + close(w.closeNotifier) + } +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 00000000..6653b9c4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,46 @@ +package loggerutils + +import ( + "bytes" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := lookupTagTemplate(ctx, defaultTemplate) + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} + +func lookupTagTemplate(ctx logger.Context, defaultTemplate string) string { + tagTemplate := ctx.Config["tag"] + + deprecatedConfigs := []string{"syslog-tag", "gelf-tag", "fluentd-tag"} + for i := 0; tagTemplate == "" && i < len(deprecatedConfigs); i++ { + cfg := deprecatedConfigs[i] + if ctx.Config[cfg] != "" { + tagTemplate = ctx.Config[cfg] + logrus.Warn(fmt.Sprintf("Using log tag from deprecated log-opt '%s'. Please use: --log-opt tag=\"%s\"", cfg, tagTemplate)) + } + } + + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + return tagTemplate +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 00000000..99e0964a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go new file mode 100644 index 00000000..99e03278 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -0,0 +1,247 @@ +// +build linux + +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, & syslog-tag. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) + if err != nil { + return nil, err + } + + logTag := path.Base(os.Args[0]) + "/" + tag + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, logTag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix socket validation + if url.Scheme == "unix" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility, & syslog-tag. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "syslog-address": + case "syslog-facility": + case "syslog-tag": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"]); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go new file mode 100644 index 00000000..50cc51b6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package syslog diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go new file mode 100644 index 00000000..40c47a6e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -0,0 +1,154 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" + containertypes "github.com/docker/engine-api/types/container" + timetypes "github.com/docker/engine-api/types/time" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-config.Stop: + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debugf("logs: end stream") + logs.Close() + return nil + } + logLine := msg.Line + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return nil, err + } + return container.StartLogger(cfg) +} + +// StartLogging initializes and starts the container logging stream. +func (daemon *Daemon) StartLogging(container *container.Container) error { + cfg := daemon.getLogConfig(container.HostConfig.LogConfig) + if cfg.Type == "none" { + return nil // do not start logging routines + } + + if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { + return err + } + l, err := container.StartLogger(cfg) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// getLogConfig returns the log configuration for the container. +func (daemon *Daemon) getLogConfig(cfg containertypes.LogConfig) containertypes.LogConfig { + if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured + if cfg.Type == "" { + cfg.Type = jsonfilelog.Name + } + return cfg + } + + // Use daemon's default log config for containers + return daemon.defaultLogConfig +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go new file mode 100644 index 00000000..f9f7def9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -0,0 +1,144 @@ +package daemon + +import ( + "errors" + "fmt" + "io" + "runtime" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + c.Lock() + defer c.Unlock() + c.Wait() + c.Reset(false) + c.SetStopped(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + // FIXME: here is race condition between two RUN instructions in Dockerfile + // because they share same runconfig and change image. Must be fixed + // in builder/builder.go + return c.ToDisk() + case libcontainerd.StateRestart: + c.Lock() + defer c.Unlock() + c.Reset(false) + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + return c.ToDisk() + case libcontainerd.StateExitProcess: + c.Lock() + defer c.Unlock() + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + c.Paused = true + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + c.Paused = false + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} + +// AttachStreams is called by libcontainerd to connect the stdio. +func (daemon *Daemon) AttachStreams(id string, iop libcontainerd.IOPipe) error { + var s *runconfig.StreamConfig + c := daemon.containers.Get(id) + if c == nil { + ec, err := daemon.getExecConfig(id) + if err != nil { + return fmt.Errorf("no such exec/container: %s", id) + } + s = ec.StreamConfig + } else { + s = c.StreamConfig + if err := daemon.StartLogging(c); err != nil { + c.Reset(false) + return err + } + } + + if stdin := s.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + io.Copy(iop.Stdin, stdin) + iop.Stdin.Close() + }() + } + } else { + if c != nil && !c.Config.Tty { + // tty is enabled, so dont close containerd's iopipe stdin. + if iop.Stdin != nil { + iop.Stdin.Close() + } + } + } + + copy := func(w io.Writer, r io.Reader) { + s.Add(1) + go func() { + if _, err := io.Copy(w, r); err != nil { + logrus.Errorf("%v stream copy error: %v", id, err) + } + s.Done() + }() + } + + if iop.Stdout != nil { + copy(s.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copy(s.Stderr(), iop.Stderr) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go new file mode 100644 index 00000000..df8b6c5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_linux.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 00000000..d4f24b28 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Named { + continue + } + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 00000000..823bec26 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,22 @@ +package network + +import ( + networktypes "github.com/docker/engine-api/types/network" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*networktypes.EndpointSettings + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool +} diff --git a/vendor/github.com/docker/docker/daemon/network_operations.go b/vendor/github.com/docker/docker/daemon/network_operations.go new file mode 100644 index 00000000..70097298 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network_operations.go @@ -0,0 +1,77 @@ +package daemon + +import ( + "fmt" + "os" + + "github.com/docker/docker/container" + derr "github.com/docker/docker/errors" + networktypes "github.com/docker/engine-api/types/network" +) + +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { + return nil +} + +func (daemon *Daemon) updateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + return nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go new file mode 100644 index 00000000..47d4b89f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -0,0 +1,686 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/specs/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioReadBpsDevices(r) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioWriteBpsDevices(r) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioReadIOpsDevices(r) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioWriteIOpsDevices(r) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, specDevice(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := getDevicesFromPath(deviceMapping) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + ulimits := c.HostConfig.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + + for _, ul := range ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func delNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = fmt.Sprintf("/proc/%d/ns/net", os.Getpid()) + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + delNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsHost() { + delNamespace(s, specs.NamespaceType("pid")) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + delNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overriden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + opt := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode} + if m.Data != "" { + opt = append(opt, strings.Split(m.Data, ",")...) + } else { + opt = append(opt, "size=65536k") + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: opt}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + mounts = append(mounts, c.IpcMounts()...) + mounts = append(mounts, c.TmpfsMounts()...) + if err := setMounts(daemon, &s, c, mounts); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + //for _, ns := range s.Linux.Namespaces { + // if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + // target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + // if err != nil { + // return nil, err + // } + + // s.Hooks = specs.Hooks{ + // Prestart: []specs.Hook{{ + // Path: target, // FIXME: cross-platform + // Args: []string{"libnetwork-setkey", c.ID}, + // }}, + // } + // } + //} + + if apparmor.IsEnabled() { + appArmorProfile := "docker-default" + if len(c.AppArmorProfile) > 0 { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*libcontainerd.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go new file mode 100644 index 00000000..dbfafbc5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go new file mode 100644 index 00000000..7989ad23 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + + container.Lock() + defer container.Unlock() + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + container.Name = newName + + defer func() { + if err != nil { + container.Name = oldName + daemon.reserveName(container.ID, oldName) + daemon.releaseName(newName) + } + }() + + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go new file mode 100644 index 00000000..74735385 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go new file mode 100644 index 00000000..3779116c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if err := daemon.containerRestart(container, seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if err := daemon.containerStop(container, seconds); err != nil { + return err + } + + if err := daemon.containerStart(container); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go new file mode 100644 index 00000000..620eee29 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -0,0 +1,12 @@ +// +build !seccomp,!windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go new file mode 100644 index 00000000..659a15de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -0,0 +1,46 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/specs/specs-go" +) + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile() + if err != nil { + return err + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go new file mode 100644 index 00000000..83a34471 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go new file mode 100644 index 00000000..25a56ad1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go new file mode 100644 index 00000000..1b34f426 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -0,0 +1,185 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/runconfig" + containertypes "github.com/docker/engine-api/types/container" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and will be removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + + return daemon.containerStart(container) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container) (err error) { + container.Lock() + defer container.Unlock() + + if container.Running { + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode == 0 { + container.ExitCode = 128 + } + container.ToDisk() + daemon.Cleanup(container) + attributes := map[string]string{ + "exitCode": fmt.Sprintf("%d", container.ExitCode), + } + daemon.LogContainerEventWithAttributes(container, "die", attributes) + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + if err := daemon.containerd.Create(container.ID, *spec, libcontainerd.WithRestartManager(container.RestartManager(true))); err != nil { + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if strings.Contains(err.Error(), "executable file not found") || + strings.Contains(err.Error(), "no such file or directory") || + strings.Contains(err.Error(), "system cannot find the file specified") { + container.ExitCode = 127 + err = fmt.Errorf("Container command '%s' not found or does not exist.", container.Path) + } + // set to 126 for container cmd can't be invoked errors + if strings.Contains(err.Error(), syscall.EACCES.Error()) { + container.ExitCode = 126 + err = fmt.Errorf("Container command '%s' could not be invoked.", container.Path) + } + + container.Reset(false) + + // start event is logged even on error + daemon.LogContainerEvent(container, "start") + return err + } + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go new file mode 100644 index 00000000..cb3478cc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -0,0 +1,121 @@ +package daemon + +import ( + "encoding/json" + "errors" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/versions/v1p20" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support stats") + } + // Remote API version (used for backwards compatibility) + apiVersion := version.Version(config.Version) + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is not running and requires no stream, return an empty stats. + if !container.IsRunning() && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.PreCPUStats = preCPUStats + // ss.MemoryStats.Limit = uint64(update.MemoryLimit) + preCPUStats = ss.CPUStats + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if apiVersion.LessThan("1.21") { + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-config.Stop: + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go new file mode 100644 index 00000000..1f016322 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -0,0 +1,189 @@ +// +build !windows + +package daemon + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/engine-api/types" + "github.com/opencontainers/runc/libcontainer/system" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// network and cgroup stats for a registered container at the specified +// interval. The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + clockTicksPerSecond: uint64(system.GetClockTicks()), + bufReader: bufio.NewReaderSize(nil, 128), + } + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } + + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + clockTicksPerSecond uint64 + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + // FIXME: move to containerd + stats.CPUStats.SystemUsage = systemUsage + + pair.publisher.Publish(*stats) + } + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go new file mode 100644 index 00000000..70174300 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/errors" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if err := daemon.containerStop(container, seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go new file mode 100644 index 00000000..5afa6024 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -0,0 +1,85 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "strconv" + "strings" + + "github.com/docker/engine-api/types" +) + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = strings.Fields(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go new file mode 100644 index 00000000..c1ab74b0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go new file mode 100644 index 00000000..fee470a3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/engine-api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) ([]string, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return warnings, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return warnings, err + } + + return warnings, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if container.IsRunning() && hostConfig.KernelMemory != 0 { + return errCannotUpdate(container.ID, fmt.Errorf("Can not update kernel memory to a running container, please stop it first.")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + container.UpdateMonitor(hostConfig.RestartPolicy) + + // if container is restarting, wait 5 seconds until it's running + if container.IsRestarting() { + container.WaitRunning(5 * time.Second) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go new file mode 100644 index 00000000..97ba7c09 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/libcontainerd" + "github.com/docker/engine-api/types/container" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint32(resources.BlkioWeight) + r.CpuShares = uint32(resources.CPUShares) + r.CpuPeriod = uint32(resources.CPUPeriod) + r.CpuQuota = uint32(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint32(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint32(resources.MemorySwap) + } + r.MemoryReservation = uint32(resources.MemoryReservation) + r.KernelMemoryLimit = uint32(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go new file mode 100644 index 00000000..37f4e7fa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -0,0 +1,178 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types" + containertypes "github.com/docker/engine-api/types/container" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the remote API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + Mountpoint: v.Path(), + } + if v, ok := v.(interface { + Labels() map[string]string + }); ok { + tv.Labels = v.Labels() + } + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) error { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + + // 1. Read already configured mount points. + for name, point := range container.MountPoints { + mountPoints[name] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Named: m.Named, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + // #10618 + bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + if binds[bind.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if len(bind.Name) > 0 { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + bind.Named = true + if bind.Driver == "local" { + bind = setBindModeIfNull(bind) + } + } + + if label.RelabelNeeded(bind.Mode) { + if err := label.Relabel(bind.Source, container.MountLabel, label.IsShared(bind.Mode)); err != nil { + return err + } + } + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go new file mode 100644 index 00000000..078fd10b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -0,0 +1,78 @@ +// +build !windows + +package daemon + +import ( + "os" + "sort" + "strconv" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + for _, m := range c.MountPoints { + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + path, err := m.Setup() + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: m.Propagation, + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": m.Propagation, + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { + if bind.Mode == "" { + bind.Mode = "z" + } + return bind +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go new file mode 100644 index 00000000..52b335cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -0,0 +1,17 @@ +package daemon + +import "time" + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 00000000..f7a9c621 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,113 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 00000000..9f744d46 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,77 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + tempFilePath := path + ".tmp" + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(tempFilePath, value, 0644); err != nil { + return err + } + return os.Rename(tempFilePath, path) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 00000000..f6e45892 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,44 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 00000000..239cd1f4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,137 @@ +package metadata + +import ( + "encoding/json" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService struct { + store Store +} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) *V2MetadataService { + return &V2MetadataService{ + store: store, + } +} + +func (serv *V2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *V2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *V2MetadataService) Remove(metadata V2Metadata) error { + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 00000000..4b42371b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,205 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // DownloadManager manages concurrent pulls. + DownloadManager *xfer.LayerDownloadManager +} + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := validateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return err + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return lastErr +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// validateRepoName validates the name of a repository. +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 00000000..86fad2ef --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,362 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debugf("Retrieving the tag list") + var tagsList map[string]string + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Create(config) + if err != nil { + return err + } + + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 00000000..748ef422 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,840 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService *metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService *metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + blobs := ld.repo.Blobs(ctx) + + layerDownload, err := blobs.Open(ctx, ld.digest) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + if err == distribution.ErrBlobUnknown { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debugf("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + imageID image.ID + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + oldTagImageID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagImageID == imageID { + return false, nil + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { + return false, err + } + } else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { + return false, err + } + + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { + return "", "", err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err = p.config.ImageStore.Create(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + imageID = image.ID(target.Digest) + if _, err := p.config.ImageStore.Get(imageID); err == nil { + // If the image already exists locally, no need to pull + // anything. + return imageID, manifestDigest, nil + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.References() { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + downloadRootFS image.RootFS // rootFS to use for registering layers. + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + if unmarshalledConfig.RootFS == nil { + return "", "", errors.New("image config has no rootfs section") + } + downloadRootFS = *unmarshalledConfig.RootFS + downloadRootFS.DiffIDs = []layer.DiffID{} + } else { + downloadRootFS = *image.NewRootFS() + } + + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + defer release() + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return "", "", err + } + } + + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range rootFS.DiffIDs { + if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + + imageID, err = p.config.ImageStore.Create(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(ref, manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + imageID, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + imageID, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return imageID, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 00000000..9fbb875e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/image" +) + +func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 00000000..52ee8e77 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,219 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + // MetaHeaders store HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the push + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService *registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // LayerStore manages layers. + LayerStore layer.Store + // ImageStore manages images. + ImageStore image.Store + // ReferenceStore manages tags. + ReferenceStore reference.Store + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on the repository named localName. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("Repository does not exist: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 00000000..b6e4a130 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,454 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := digest.Digest(imageID).Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + imgID, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[image.ID]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) + + if _, present := imagesSeen[association.ImageID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ImageID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { + img, err := p.config.ImageStore.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + var l layer.Layer + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, l) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + } + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + p.config.LayerStore.Release(l) + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 00000000..e86badb4 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,438 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService *metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + img, err := p.config.ImageStore.Get(imageID) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + var l layer.Layer + + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + l = layer.EmptyLayer + } else { + l, err = p.config.LayerStore.Get(topLayerID) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer layer.ReleaseAndLog(p.config.LayerStore, l) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + repoInfo: p.repoInfo, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(img.RootFS.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer layer.Layer + v2MetadataService *metadata.V2MetadataService + repoInfo reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.repo.Named().Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) + if err != nil { + progress.Update(progressOutput, pd.ID(), "Image push failed") + return distribution.Descriptor{}, retryOnError(err) + } + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = descriptor + pd.pushState.Unlock() + return descriptor, nil + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + mountAttemptsRemaining := 3 + + // Attempt to find another repository in the same registry to mount the layer + // from to avoid an unnecessary upload. + // Note: metadata is stored from oldest to newest, so we iterate through this + // slice in reverse to maximize our chances of the blob still existing in the + // remote repository. + for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- { + mountFrom := v2Metadata[i] + + sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository) + if err != nil { + continue + } + if pd.repoInfo.Hostname() != sourceRepo.Hostname() { + // don't mount blobs from another registry + continue + } + + namedRef, err := reference.WithName(mountFrom.SourceRepository) + if err != nil { + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + continue + } + + canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) + if err != nil { + continue + } + + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName()) + + layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef)) + switch err := err.(type) { + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + case nil: + // blob upload session created successfully, so begin the upload + mountAttemptsRemaining = 0 + default: + // unable to mount layer from this repository, so this source mapping is no longer valid + logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) + pd.v2MetadataService.Remove(mountFrom) + mountAttemptsRemaining-- + } + } + + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + arch, err := pd.layer.TarStream() + if err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + // don't care if this fails; best effort + size, _ := pd.layer.DiffSize() + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") + compressedReader, compressionDone := compress(reader) + defer func() { + reader.Close() + <-compressionDone + }() + + digester := digest.Canonical.New() + tee := io.TeeReader(compressedReader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + compressedReader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + pd.pushState.Lock() + + // If Commit succeeded, that's an indication that the remote registry + // speaks the v2 protocol. + pd.pushState.confirmedV2 = true + + descriptor := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + pd.pushState.remoteLayers[diffID] = descriptor + + pd.pushState.Unlock() + + return descriptor, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +// layerAlreadyExists checks if the registry already know about any of the +// metadata passed in the "metadata" slice. If it finds one that the registry +// knows about, it returns the known digest and "true". +func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { + for _, meta := range metadata { + // Only check blobsums that are known to this repository or have an unknown source + if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { + continue + } + descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) + switch err { + case nil: + descriptor.MediaType = schema2.MediaTypeLayer + return descriptor, true, nil + case distribution.ErrBlobUnknown: + // nop + default: + return distribution.Descriptor{}, false, err + } + } + return distribution.Descriptor{}, false, nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 00000000..f768a14d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,130 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/docker/distribution" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/engine-api/types" + "golang.org/x/net/context" +) + +type dumbCredentialStore struct { + auth *types.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +func (dcs dumbCredentialStore) RefreshToken(*url.URL, string) string { + return dcs.auth.IdentityToken +} + +func (dcs dumbCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// NewV2Repository returns a repository (v2 only). It creates a HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := dumbCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{ + auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + }, + }, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 00000000..739c427c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,430 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 00000000..dd83f8b8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,392 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 00000000..563824c1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,163 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/docker/README.md b/vendor/github.com/docker/docker/docker/README.md new file mode 100644 index 00000000..015bc133 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/docker/docker/docker/client.go b/vendor/github.com/docker/docker/docker/client.go new file mode 100644 index 00000000..487d4458 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/client.go @@ -0,0 +1,33 @@ +package docker + +import ( + "path/filepath" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/utils" +) + +var clientFlags = &cli.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} + +func init() { + client := clientFlags.FlagSet + client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") + + clientFlags.PostParse = func() { + clientFlags.Common.PostParse() + + if clientFlags.ConfigDir != "" { + cliconfig.SetConfigDir(clientFlags.ConfigDir) + } + + if clientFlags.Common.TrustKey == "" { + clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + } + + if clientFlags.Common.Debug { + utils.EnableDebug() + } + } +} diff --git a/vendor/github.com/docker/docker/docker/common.go b/vendor/github.com/docker/docker/docker/common.go new file mode 100644 index 00000000..1615ef25 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/common.go @@ -0,0 +1,100 @@ +package docker + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTrustKeyFile = "key.json" + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" + tlsVerifyKey = "tlsverify" +) + +var ( + commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)} + + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +func init() { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + commonFlags.PostParse = postParseCommon + + cmd := commonFlags.FlagSet + + cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") + cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") + cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") + cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") + + var tlsOptions tlsconfig.Options + commonFlags.TLSOptions = &tlsOptions + cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, defaultCaFile), "Trust certs signed only by this CA") + cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") + cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") + + cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") +} + +func postParseCommon() { + cmd := commonFlags.FlagSet + + setDaemonLogLevel(commonFlags.LogLevel) + + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well + if cmd.IsSet("-"+tlsVerifyKey) || commonFlags.TLSVerify { + commonFlags.TLS = true + } + + if !commonFlags.TLS { + commonFlags.TLSOptions = nil + } else { + tlsOptions := commonFlags.TLSOptions + tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !cmd.IsSet("-tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !cmd.IsSet("-tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +func setDaemonLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/docker/docker/daemon.go b/vendor/github.com/docker/docker/docker/daemon.go new file mode 100644 index 00000000..7a0b53f4 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon.go @@ -0,0 +1,417 @@ +// +build daemon + +package docker + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/cli" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/docker/listeners" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + daemonUsage = " docker daemon [ --help | ... ]\n" + daemonConfigFileFlag = "-config-file" +) + +var ( + daemonCli cli.Handler = NewDaemonCli() +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + flags *flag.FlagSet +} + +func presentInHelp(usage string) string { return usage } +func absentFromHelp(string) string { return "" } + +// NewDaemonCli returns a pre-configured daemon CLI +func NewDaemonCli() *DaemonCli { + daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true) + + // TODO(tiborvass): remove InstallFlags? + daemonConfig := new(daemon.Config) + daemonConfig.LogConfig.Config = make(map[string]string) + daemonConfig.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + daemonConfig.V2Only = true + } + + daemonConfig.InstallFlags(daemonFlags, presentInHelp) + daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp) + daemonFlags.Require(flag.Exact, 0) + + return &DaemonCli{ + Config: daemonConfig, + flags: daemonFlags, + } +} + +func migrateKey() (err error) { + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), defaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func getGlobalFlag() (globalFlag *flag.Flag) { + defer func() { + if x := recover(); x != nil { + switch f := x.(type) { + case *flag.Flag: + globalFlag = f + default: + panic(x) + } + } + }() + visitor := func(f *flag.Flag) { panic(f) } + commonFlags.FlagSet.Visit(visitor) + clientFlags.FlagSet.Visit(visitor) + return +} + +// CmdDaemon is the daemon command, called the raw arguments after `docker daemon`. +func (cli *DaemonCli) CmdDaemon(args ...string) error { + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + if !commonFlags.FlagSet.IsEmpty() || !clientFlags.FlagSet.IsEmpty() { + // deny `docker -D daemon` + illegalFlag := getGlobalFlag() + fmt.Fprintf(os.Stderr, "invalid flag '-%s'.\nSee 'docker daemon --help'.\n", illegalFlag.Names[0]) + os.Exit(1) + } else { + // allow new form `docker daemon -D` + flag.Merge(cli.flags, commonFlags.FlagSet) + } + + configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") + + cli.flags.ParseFlags(args, true) + commonFlags.PostParse() + + if commonFlags.TrustKey == "" { + commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile) + } + cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + cli.Config = cliConfig + + if cli.Config.Debug { + utils.EnableDebug() + } + + if utils.ExperimentalBuild() { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + logrus.Fatalf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + logrus.Fatalf("Failed to set log opts: %v", err) + } + } + + var pfile *pidfile.PIDFile + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + logrus.Fatalf("Error starting daemon: %v", err) + } + pfile = pf + defer func() { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + AuthorizationPluginNames: cli.Config.AuthorizationPlugins, + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + } + serverConfig = setPlatformServerConfig(serverConfig, cli.Config) + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + logrus.Fatal(err) + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = opts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr) + } + l, err := listeners.Init(protoAddrParts[0], protoAddrParts[1], serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + logrus.Fatal(err) + } + + logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) + api.Accept(protoAddrParts[1], l...) + } + + if err := migrateKey(); err != nil { + logrus.Fatal(err) + } + cli.TrustKeyPath = commonFlags.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + + containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.ExecRoot, "libcontainerd"), cli.getPlatformRemoteOptions()...) + if err != nil { + logrus.Fatal(err) + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Error starting daemon: %v", err) + } + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + initRouter(api, d) + + reload := func(config *daemon.Config) { + if err := d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + api.EnableProfiler() + } + + } + } + + setupConfigReloadTrap(*configFile, cli.flags, reload) + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + signal.Trap(func() { + api.Close() + <-serveAPIWait + shutdownDaemon(d, 15) + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + }) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + shutdownDaemon(d, 15) + containerdRemote.Cleanup() + if errAPI != nil { + if pfile != nil { + if err := pfile.Remove(); err != nil { + logrus.Error(err) + } + } + logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) + } + return nil +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(timeout * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) { + config.Debug = commonConfig.Debug + config.Hosts = commonConfig.Hosts + config.LogLevel = commonConfig.LogLevel + config.TLS = commonConfig.TLS + config.TLSVerify = commonConfig.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if commonConfig.TLSOptions != nil { + config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile + } + + if configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile) + if err != nil { + if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(tlsVerifyKey) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + setDaemonLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon) { + routers := []router.Router{ + container.NewRouter(d), + image.NewRouter(d), + systemrouter.NewRouter(d), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} diff --git a/vendor/github.com/docker/docker/docker/daemon_freebsd.go b/vendor/github.com/docker/docker/docker/daemon_freebsd.go new file mode 100644 index 00000000..013f0e91 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_freebsd.go @@ -0,0 +1,7 @@ +// +build daemon + +package docker + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_linux.go b/vendor/github.com/docker/docker/docker/daemon_linux.go new file mode 100644 index 00000000..3ff9a49e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_linux.go @@ -0,0 +1,13 @@ +// +build daemon + +package docker + +import ( + systemdDaemon "github.com/coreos/go-systemd/daemon" +) + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/docker/docker/docker/daemon_none.go b/vendor/github.com/docker/docker/docker/daemon_none.go new file mode 100644 index 00000000..b2cbfd41 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_none.go @@ -0,0 +1,13 @@ +// +build !daemon + +package docker + +import "github.com/docker/docker/cli" + +const daemonUsage = "" + +var daemonCli cli.Handler + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/docker/daemon_unix.go b/vendor/github.com/docker/docker/docker/daemon_unix.go new file mode 100644 index 00000000..8e212f8e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/daemon_unix.go @@ -0,0 +1,82 @@ +// +build daemon,!windows + +package docker + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config { + serverConfig.EnableCors = daemonCfg.EnableCors + serverConfig.CorsHeaders = daemonCfg.CorsHeaders + + return serverConfig +} + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir() string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + if err := daemon.ReloadConfiguration(configFile, flags, reload); err != nil { + logrus.Error(err) + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + return opts +} diff --git a/vendor/github.com/docker/docker/docker/docker.go b/vendor/github.com/docker/docker/docker/docker.go new file mode 100644 index 00000000..cbb1e4cf --- /dev/null +++ b/vendor/github.com/docker/docker/docker/docker.go @@ -0,0 +1,77 @@ +package docker + +import ( + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/client" + "github.com/docker/docker/cli" + "github.com/docker/docker/dockerversion" + flag "github.com/docker/docker/pkg/mflag" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +func Main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + + logrus.SetOutput(stderr) + + flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) + + flag.Usage = func() { + fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n"+daemonUsage+" docker [ --help | -v | --version ]\n\n") + fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") + + flag.CommandLine.SetOutput(stdout) + flag.PrintDefaults() + + help := "\nCommands:\n" + + for _, cmd := range dockerCommands { + help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) + } + + help += "\nRun 'docker COMMAND --help' for more information on a command." + fmt.Fprintf(stdout, "%s\n", help) + } + + flag.Parse() + + if *flVersion { + showVersion() + return + } + + if *flHelp { + // if global flag --help is present, regardless of what other options and commands there are, + // just print the usage. + flag.Usage() + return + } + + clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) + + c := cli.New(clientCli, daemonCli) + if err := c.Run(flag.Args()...); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + if utils.ExperimentalBuild() { + fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) + } else { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) + } +} diff --git a/vendor/github.com/docker/docker/docker/flags.go b/vendor/github.com/docker/docker/docker/flags.go new file mode 100644 index 00000000..fdff7b47 --- /dev/null +++ b/vendor/github.com/docker/docker/docker/flags.go @@ -0,0 +1,30 @@ +package docker + +import ( + "sort" + + "github.com/docker/docker/cli" + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") + flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") +) + +type byName []cli.Command + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +var dockerCommands []cli.Command + +// TODO(tiborvass): do not show 'daemon' on client-only binaries + +func init() { + for _, cmd := range cli.DockerCommands { + dockerCommands = append(dockerCommands, cmd) + } + sort.Sort(byName(dockerCommands)) +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners.go b/vendor/github.com/docker/docker/docker/listeners/listeners.go new file mode 100644 index 00000000..6ddac0da --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners.go @@ -0,0 +1,19 @@ +package listeners + +import ( + "crypto/tls" + "net" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/sockets" +) + +func initTCPSocket(addr string, tlsConfig *tls.Config) (l net.Listener, err error) { + if tlsConfig == nil || tlsConfig.ClientAuth != tls.RequireAndVerifyClientCert { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + if l, err = sockets.NewTCPSocket(addr, tlsConfig); err != nil { + return nil, err + } + return +} diff --git a/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go new file mode 100644 index 00000000..548cb78e --- /dev/null +++ b/vendor/github.com/docker/docker/docker/listeners/listeners_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "fd": + ls, err = listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + case "tcp": + l, err := initTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("No sockets found. Make sure the docker daemon was started by systemd.") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd address, should be number: %v", err) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("Too few socket activated files passed in") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file at fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + logrus.Errorf("Failed to close systemd activated file at fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/docker/docker/docker/runc.go b/vendor/github.com/docker/docker/docker/runc.go new file mode 100644 index 00000000..dccbccbb --- /dev/null +++ b/vendor/github.com/docker/docker/docker/runc.go @@ -0,0 +1,12 @@ +// +build !exclude_runc + +package docker + +import ( + "github.com/docker/docker/pkg/reexec" + "github.com/opencontainers/runc" +) + +func init() { + reexec.Register("docker-runc", runc.Main) +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 00000000..d2a891c4 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 00000000..6644bce2 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,13 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" +) diff --git a/vendor/github.com/docker/docker/errors/errors.go b/vendor/github.com/docker/docker/errors/errors.go new file mode 100644 index 00000000..8070f48f --- /dev/null +++ b/vendor/github.com/docker/docker/errors/errors.go @@ -0,0 +1,41 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 00000000..42eaa40b --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,372 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + } + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return manifestFile.Close() + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID, outStream) + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID, img.ID) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, progressOutput progress.Output) (layer.Layer, error) { + rawTar, err := os.Open(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + inflatedLayerData, err := archive.DecompressStream(rawTar) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if progressOutput != nil { + fileInfo, err := os.Stat(filename) + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + progressReader := progress.NewProgressReader(inflatedLayerData, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + + return l.ls.Register(progressReader, rootFS.ChainID()) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return repositoriesFile.Close() + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID, outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 00000000..9ec3cc9f --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,319 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.ID(id)) + if err != nil { + return nil, err + } + addAssoc(image.ID(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(assoc.ImageID, assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + var imgID image.ID + if imgID, err = l.rs.Get(ref); err != nil { + return nil, err + } + addAssoc(imgID, ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + if err = s.saveImage(id); err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: digest.Digest(id).Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(manifest); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) error { + img, err := s.is.Get(id) + if err != nil { + return err + } + + if len(img.RootFS.DiffIDs) == 0 { + return fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{} + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { + return err + } + layers = append(layers, v1Img.ID) + parent = v1ID + } + + configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return err + } + + s.images[id].layers = layers + return nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return err + } + + // serialize filesystem + tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) + if err != nil { + return err + } + defer tarFile.Close() + + l, err := s.ls.Get(id) + if err != nil { + return err + } + defer layer.ReleaseAndLog(s.ls, l) + + arch, err := l.TarStream() + if err != nil { + return err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return err + } + } + + s.savedLayers[legacyImg.ID] = struct{}{} + return nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 00000000..5e208777 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,37 @@ +package tarexport + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go new file mode 100644 index 00000000..7e8e47bc --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(friendlyName string) { + clnt.mapMutex.Lock() + delete(clnt.containers, friendlyName) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go new file mode 100644 index 00000000..8eab7512 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -0,0 +1,401 @@ +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier +} + +func (clnt *client) AddProcess(containerID, processFriendlyName string, specp Process) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + spec, err := container.spec() + if err != nil { + return err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if specp.Env != nil { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + iopipe, err := p.openFifos(sp.Terminal) + if err != nil { + return err + } + + if _, err := clnt.remote.apiClient.AddProcess(context.Background(), r); err != nil { + p.closeFifos(iopipe) + return err + } + + container.processes[processFriendlyName] = p + + clnt.unlock(containerID) + + if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { + return err + } + clnt.lock(containerID) + + return nil +} + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if ctr, err := clnt.getContainer(containerID); err == nil { + if ctr.restarting { + ctr.restartManager.Cancel() + ctr.clean() + } else { + return fmt.Errorf("Container %s is aleady active", containerID) + } + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start() +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happenned without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + var exitCode uint32 + if event, ok := clnt.remote.pastEvents[containerID]; ok { + exitCode = event.Status + delete(clnt.remote.pastEvents, containerID) + } + + err := clnt.backend.StateChanged(containerID, StateInfo{ + State: StateExit, + ExitCode: exitCode, + }) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Error(err) + } + } + return container +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go new file mode 100644 index 00000000..1a1f7fe7 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_liverestore_linux.go @@ -0,0 +1,83 @@ +// +build experimental + +package libcontainerd + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" +) + +func (clnt *client) restore(cont *containerd.Container, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is aleady active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + iopipe, err := container.openFifos(terminal) + if err != nil { + return err + } + + if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + State: StateRestore, + Pid: container.systemPid, + }) + + if err != nil { + return err + } + + if event, ok := clnt.remote.pastEvents[containerID]; ok { + // This should only be a pause or resume event + if event.Type == StatePause || event.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + State: event.Type, + Pid: container.systemPid, + }) + } + + logrus.Warnf("unexpected backlog event: %#v", event) + } + + return nil +} + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + if err := clnt.restore(cont, options...); err != nil { + logrus.Errorf("error restoring %s: %v", containerID, err) + } + return nil + } + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go new file mode 100644 index 00000000..1b4a2bc5 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_shutdownrestore_linux.go @@ -0,0 +1,46 @@ +// +build !experimental + +package libcontainerd + +import ( + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +func (clnt *client) Restore(containerID string, options ...CreateOption) error { + w := clnt.getOrCreateExitNotifier(containerID) + defer w.close() + cont, err := clnt.getContainerdContainer(containerID) + if err == nil && cont.Status != "stopped" { + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("error sending sigterm to %v: %v", containerID, err) + } + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + return nil + } + case <-w.wait(): + return nil + } + } + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go new file mode 100644 index 00000000..30bc9502 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container.go @@ -0,0 +1,40 @@ +package libcontainerd + +import ( + "fmt" + "time" + + "github.com/docker/docker/restartmanager" +) + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + restartManager restartmanager.RestartManager + restarting bool + processes map[string]*process + startedAt time.Time +} + +// WithRestartManager sets the restartmanager to be used with the container. +func WithRestartManager(rm restartmanager.RestartManager) CreateOption { + return restartManager{rm} +} + +type restartManager struct { + rm restartmanager.RestartManager +} + +func (rm restartManager) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.restartManager = rm.rm + return nil + } + return fmt.Errorf("WithRestartManager option not supported for this client") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_linux.go b/vendor/github.com/docker/docker/libcontainerd/container_linux.go new file mode 100644 index 00000000..8a49cde2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_linux.go @@ -0,0 +1,209 @@ +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/restartmanager" + "github.com/opencontainers/specs/specs-go" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil { + logrus.Warnf("failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start() error { + spec, err := ctr.spec() + if err != nil { + return nil + } + iopipe, err := ctr.openFifos(spec.Process.Terminal) + if err != nil { + return err + } + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + } + ctr.client.appendContainer(ctr) + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.startedAt = time.Now() + + if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { + return err + } + ctr.systemPid = systemPid(resp.Container) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + State: e.Type, + ExitCode: e.Status, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + if st.State == StateExit && ctr.restartManager != nil { + restart, wait, err := ctr.restartManager.ShouldRestart(e.Status, false, time.Since(ctr.startedAt)) + if err != nil { + logrus.Warnf("container %s %v", ctr.containerID, err) + } else if restart { + st.State = StateRestart + ctr.restarting = true + ctr.client.deleteContainer(e.Id) + go func() { + err := <-wait + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + ctr.restarting = false + if err != nil { + st.State = StateExit + ctr.clean() + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + }) + if err != restartmanager.ErrRestartCanceled { + logrus.Error(err) + } + } else { + ctr.start() + } + }() + } + } + + // Remove process from list if we have exited + // We need to do so here in case the Message Handler decides to restart it. + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Error(err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f := ctr.fifo(i) + c := make(chan struct{}) + go func() { + close(c) // this channel is used to not close the writer too early, before readonly open has been called. + io.Copy(ioutil.Discard, openReaderFromFifo(f)) + }() + <-c + closeReaderFifo(f) // avoid blocking permanently on open if there is no writer side + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go new file mode 100644 index 00000000..379cbf1f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go new file mode 100644 index 00000000..57562c87 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_linux.go b/vendor/github.com/docker/docker/libcontainerd/process_linux.go new file mode 100644 index 00000000..3c48576f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_linux.go @@ -0,0 +1,110 @@ +package libcontainerd + +import ( + "fmt" + "io" + "os" + "path/filepath" + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +var fdNames = map[int]string{ + syscall.Stdin: "stdin", + syscall.Stdout: "stdout", + syscall.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(terminal bool) (*IOPipe, error) { + bundleDir := p.dir + if err := os.MkdirAll(bundleDir, 0700); err != nil { + return nil, err + } + + for i := 0; i < 3; i++ { + f := p.fifo(i) + if err := syscall.Mkfifo(f, 0700); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("mkfifo: %s %v", f, err) + } + } + + io := &IOPipe{} + stdinf, err := os.OpenFile(p.fifo(syscall.Stdin), syscall.O_RDWR, 0) + if err != nil { + return nil, err + } + + io.Stdout = openReaderFromFifo(p.fifo(syscall.Stdout)) + if !terminal { + io.Stderr = openReaderFromFifo(p.fifo(syscall.Stderr)) + } else { + io.Stderr = emptyReader{} + } + + io.Stdin = ioutils.NewWriteCloserWrapper(stdinf, func() error { + stdinf.Close() + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + return err + }) + + return io, nil +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + closeReaderFifo(p.fifo(syscall.Stdout)) + closeReaderFifo(p.fifo(syscall.Stderr)) +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func openReaderFromFifo(fn string) io.Reader { + r, w := io.Pipe() + c := make(chan struct{}) + go func() { + close(c) + stdoutf, err := os.OpenFile(fn, syscall.O_RDONLY, 0) + if err != nil { + r.CloseWithError(err) + } + if _, err := io.Copy(w, stdoutf); err != nil { + r.CloseWithError(err) + } + w.Close() + stdoutf.Close() + }() + <-c // wait for the goroutine to get scheduled and syscall to block + return r +} + +// closeReaderFifo closes fifo that may be blocked on open by opening the write side. +func closeReaderFifo(fn string) { + f, err := os.OpenFile(fn, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) + if err != nil { + return + } + f.Close() +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_linux.go b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go new file mode 100644 index 00000000..34bc81d2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_linux.go @@ -0,0 +1,29 @@ +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go new file mode 100644 index 00000000..a679edcf --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote.go @@ -0,0 +1,18 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() +} + +// RemoteOption allows to configure paramters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go new file mode 100644 index 00000000..fa339ce4 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_linux.go @@ -0,0 +1,290 @@ +package libcontainerd + +import ( + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + connectionRetryDelay = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + pastEvents map[string]*containerd.Event + runtimeArgs []string +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + pastEvents: make(map[string]*containerd.Event), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) Cleanup() { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + defer f.Close() + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } + +} + +func (r *remote) getLastEventTimestamp() int64 { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t.Unix() + } + + f, err := os.Open(r.eventTsPath) + defer f.Close() + if err != nil { + logrus.Warn("libcontainerd: Unable to access last event ts: %v", err) + return t.Unix() + } + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warn("libcontainerd: Unable to read last event ts: %v", err) + return t.Unix() + } + + t.UnmarshalText(b) + + return t.Unix() +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + er := &containerd.EventsRequest{ + Timestamp: uint64(r.getLastEventTimestamp()), + } + events, err := r.apiClient.Events(context.Background(), er) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + live := false + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + if live == false { + logrus.Debugf("received past containerd event: %#v", e) + + // Pause/Resume events should never happens after exit one + switch e.Type { + case StateExit: + r.pastEvents[e.Id] = e + case StatePause: + r.pastEvents[e.Id] = e + case StateResume: + r.pastEvents[e.Id] = e + case stateLive: + live = true + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } else { + logrus.Debugf("received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Errorf("no state for container: %q", err) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("error processing state change for %s: %v", e.Id, err) + } + + r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0)) + } + } +} + +func (r *remote) runContainerdDaemon() error { + var err error + r.apiClient, err = newBridge(stateDir, 10, "docker-runc", r.runtimeArgs) + return err +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go new file mode 100644 index 00000000..d745298a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge.go @@ -0,0 +1,48 @@ +package libcontainerd + +import ( + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/containerd/api/grpc/server" + "github.com/docker/containerd/api/grpc/types" + "github.com/docker/containerd/subreaper" + "github.com/docker/containerd/supervisor" +) + +var ( + stateDir = "/run/containerd" +) + +type bridge struct { + s types.APIServer +} + +func newBridge(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIClient, error) { + s, err := daemon(stateDir, concurrency, runtimeName, runtimeArgs) + if err != nil { + return nil, err + } + return &bridge{s: s}, nil +} + +func daemon(stateDir string, concurrency int, runtimeName string, runtimeArgs []string) (types.APIServer, error) { + if err := subreaper.Start(); err != nil { + logrus.WithField("error", err).Error("containerd: start subreaper") + } + sv, err := supervisor.New(stateDir, runtimeName, "", runtimeArgs, 15*time.Second, 500) + if err != nil { + return nil, err + } + wg := &sync.WaitGroup{} + for i := 0; i < concurrency; i++ { + wg.Add(1) + w := supervisor.NewWorker(sv, wg) + go w.Start() + } + if err := sv.Start(); err != nil { + return nil, err + } + return server.NewServer(sv), nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go new file mode 100644 index 00000000..0000bbd6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/rpc_bridge_wrapper.go @@ -0,0 +1,131 @@ +package libcontainerd + +import ( + "io" + + "github.com/docker/containerd/api/grpc/types" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func (b *bridge) CreateContainer(ctx context.Context, in *types.CreateContainerRequest, opts ...grpc.CallOption) (*types.CreateContainerResponse, error) { + return b.s.CreateContainer(ctx, in) +} + +func (b *bridge) UpdateContainer(ctx context.Context, in *types.UpdateContainerRequest, opts ...grpc.CallOption) (*types.UpdateContainerResponse, error) { + return b.s.UpdateContainer(ctx, in) +} + +func (b *bridge) Signal(ctx context.Context, in *types.SignalRequest, opts ...grpc.CallOption) (*types.SignalResponse, error) { + return b.s.Signal(ctx, in) +} + +func (b *bridge) UpdateProcess(ctx context.Context, in *types.UpdateProcessRequest, opts ...grpc.CallOption) (*types.UpdateProcessResponse, error) { + return b.s.UpdateProcess(ctx, in) +} + +func (b *bridge) AddProcess(ctx context.Context, in *types.AddProcessRequest, opts ...grpc.CallOption) (*types.AddProcessResponse, error) { + return b.s.AddProcess(ctx, in) +} + +func (b *bridge) CreateCheckpoint(ctx context.Context, in *types.CreateCheckpointRequest, opts ...grpc.CallOption) (*types.CreateCheckpointResponse, error) { + return b.s.CreateCheckpoint(ctx, in) +} + +func (b *bridge) DeleteCheckpoint(ctx context.Context, in *types.DeleteCheckpointRequest, opts ...grpc.CallOption) (*types.DeleteCheckpointResponse, error) { + return b.s.DeleteCheckpoint(ctx, in) +} + +func (b *bridge) ListCheckpoint(ctx context.Context, in *types.ListCheckpointRequest, opts ...grpc.CallOption) (*types.ListCheckpointResponse, error) { + return b.s.ListCheckpoint(ctx, in) +} + +func (b *bridge) State(ctx context.Context, in *types.StateRequest, opts ...grpc.CallOption) (*types.StateResponse, error) { + return b.s.State(ctx, in) +} + +func (b *bridge) GetServerVersion(ctx context.Context, in *types.GetServerVersionRequest, opts ...grpc.CallOption) (*types.GetServerVersionResponse, error) { + return b.s.GetServerVersion(ctx, in) +} + +func (b *bridge) Events(ctx context.Context, in *types.EventsRequest, opts ...grpc.CallOption) (types.API_EventsClient, error) { + c := make(chan *types.Event, 1024) + client := &aPI_EventsClient{ + c: c, + } + client.ctx = ctx + server := &aPI_EventsServer{ + c: c, + } + server.ctx = ctx + go b.s.Events(in, server) + return client, nil +} + +func (b *bridge) Stats(ctx context.Context, in *types.StatsRequest, opts ...grpc.CallOption) (*types.StatsResponse, error) { + return b.s.Stats(ctx, in) +} + +type aPI_EventsServer struct { + serverStream + c chan *types.Event +} + +func (a *aPI_EventsServer) Send(msg *types.Event) error { + a.c <- msg + return nil +} + +type serverStream struct { + stream +} + +func (s *serverStream) SendHeader(metadata.MD) error { + return nil +} + +func (s *serverStream) SetTrailer(metadata.MD) { +} + +type aPI_EventsClient struct { + clientStream + c chan *types.Event +} + +func (a *aPI_EventsClient) Recv() (*types.Event, error) { + e, ok := <-a.c + if !ok { + return nil, io.EOF + } + return e, nil +} + +type stream struct { + ctx context.Context +} + +func (s *stream) Context() context.Context { + return s.ctx +} +func (s *stream) SendMsg(m interface{}) error { + return nil +} +func (s *stream) RecvMsg(m interface{}) error { + return nil +} + +// ClientStream defines the interface a client stream has to satify. +type clientStream struct { + stream +} + +func (c *clientStream) Header() (metadata.MD, error) { + return nil, nil +} +func (c *clientStream) Trailer() metadata.MD { + return nil +} +func (c *clientStream) CloseSend() error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go new file mode 100644 index 00000000..bb82fe40 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -0,0 +1,60 @@ +package libcontainerd + +import "io" + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestart = "restart" + StateRestore = "restore" + StateStartProcess = "start-process" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state + stateLive = "live" +) + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string + OOMKilled bool // TODO Windows containerd factor out +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error + AttachStreams(processFriendlyName string, io IOPipe) error +} + +// Client provides access to containerd features. +type Client interface { + Create(containerID string, spec Spec, options ...CreateOption) error + Signal(containerID string, sig int) error + AddProcess(containerID, processFriendlyName string, process Process) error + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.Reader + Stderr io.Reader + Terminal bool // Whether stderr is connected on Windows +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go new file mode 100644 index 00000000..bee12d91 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -0,0 +1,47 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +// Spec is the base configuration for the container. It specifies platform +// independent configuration. This information must be included when the +// bundle is packaged for distribution. +type Spec specs.Spec + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specified the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxProcessLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary container a container summary from containerd +type Summary struct{} + +// User specifies linux specific user and group information for the container's +// main process. +type User specs.User + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go new file mode 100644 index 00000000..5b67244f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -0,0 +1,52 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/specs/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 00000000..9ee75461 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,210 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/specs/specs-go" +) + +func sPtr(s string) *string { return &s } +func rPtr(r rune) *rune { return &r } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/sched_debug", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + Devices: []specs.Device{ + { + Type: "c", + Path: "/dev/zero", + Major: 1, + Minor: 5, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/null", + Major: 1, + Minor: 3, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/urandom", + Major: 1, + Minor: 9, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/random", + Major: 1, + Minor: 8, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + { + Type: "c", + Path: "/dev/fuse", + Major: 10, + Minor: 229, + FileMode: fmPtr(0666), + UID: u32Ptr(0), + GID: u32Ptr(0), + }, + }, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go new file mode 100644 index 00000000..6af771ff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -0,0 +1,92 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "github.com/docker/containerd/subreaper/exec" + "path/filepath" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to +// replace and write it to disk. +func LoadProfile(profilePath string) error { + _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath)) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go new file mode 100644 index 00000000..fc82c46b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -0,0 +1,54 @@ +package authorization + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go new file mode 100644 index 00000000..f7039086 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -0,0 +1,165 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/reference/api/docker_remote_api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return fmt.Errorf("authorization denied by plugin %s: %s", plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if it's length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go new file mode 100644 index 00000000..1b65ac0a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -0,0 +1,83 @@ +package authorization + +import "github.com/docker/docker/pkg/plugins" + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorize the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorize the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// NewPlugins constructs and initialize the authorization plugins based on plugin names +func NewPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Plugin + name string +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Client.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initialize the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + if a.plugin == nil { + var err error + a.plugin, err = plugins.Get(a.name, AuthZApiImplements) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go new file mode 100644 index 00000000..245a0ef7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "github.com/Sirupsen/logrus" + "net" + "net/http" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replace the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replace the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // Flush flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + + r *http.Request + + status int + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// Header returns the internal http header +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// Override replace the body of the HTTP reply +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// Override replace the headers of the HTTP reply +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Errorf("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the status code + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 00000000..784d65d6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 00000000..23befae6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "gopkg.in/fsnotify.v1" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 00000000..42038835 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "gopkg.in/fsnotify.v1" + +// fsNotify wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// GetEvents returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// GetErrors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 00000000..52610853 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "gopkg.in/fsnotify.v1" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 00000000..e84a815c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..1764fc28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,524 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "agitated", + "amazing", + "angry", + "awesome", + "backstabbing", + "berserk", + "big", + "boring", + "clever", + "cocky", + "compassionate", + "condescending", + "cranky", + "desperate", + "determined", + "distracted", + "dreamy", + "drunk", + "ecstatic", + "elated", + "elegant", + "evil", + "fervent", + "focused", + "furious", + "gigantic", + "gloomy", + "goofy", + "grave", + "happy", + "high", + "hopeful", + "hungry", + "insane", + "jolly", + "jovial", + "kickass", + "lonely", + "loving", + "mad", + "modest", + "naughty", + "nauseous", + "nostalgic", + "pedantic", + "pensive", + "prickly", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "sick", + "silly", + "sleepy", + "small", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "tiny", + "trusting", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 00000000..a21ba137 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,100 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + var ( + err error + ) + + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 00000000..7d12fcbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthgrouh for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 00000000..79c66b32 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go new file mode 100644 index 00000000..0589cf2a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_freebsd.go @@ -0,0 +1,18 @@ +package operatingsystem + +import ( + "errors" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + // TODO: Implement OS detection + return "", errors.New("Cannot detect OS version") +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 00000000..e04a3499 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 00000000..acc89716 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go new file mode 100644 index 00000000..58cc4017 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -0,0 +1,50 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go new file mode 100644 index 00000000..992987e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_freebsd.go @@ -0,0 +1,15 @@ +package platform + +import ( + "github.com/docker/containerd/subreaper/exec" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return string(machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go new file mode 100644 index 00000000..30c58c78 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture get the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go new file mode 100644 index 00000000..59e25295 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could no read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go new file mode 100644 index 00000000..5dcbadfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go new file mode 100644 index 00000000..c9875cf6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 00000000..09364617 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go new file mode 100644 index 00000000..8910197f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registred to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md new file mode 100644 index 00000000..b3e45457 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go new file mode 100644 index 00000000..41a0d2eb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -0,0 +1,87 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random stirng with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 00000000..c1530cef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 00000000..cbd00999 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,128 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go new file mode 100644 index 00000000..22ae0d95 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go @@ -0,0 +1,7 @@ +package sysinfo + +// New returns an empty SysInfo for freebsd for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 00000000..41fb0d2b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,246 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts() + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warnf("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit.") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation.") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warnf("Your kernel does not support oom control.") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warnf("Your kernel does not support memory swappiness.") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warnf("Your kernel does not support kernel memory limit.") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warnf("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 00000000..d580584d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(step, os.SEEK_END); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go new file mode 100644 index 00000000..02610b8b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 00000000..d9cb367d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 00000000..1137db51 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go new file mode 100644 index 00000000..51dfa5cf --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -0,0 +1,115 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" + // defaultProfilePath is the default path for the apparmor profile to be saved. + defaultProfilePath = path.Join(profileDirectory, "docker") +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile and installs it in the +// ProfileDirectory with `apparmor_parser`. +func InstallDefault(name string) error { + // Make sure the path where they want to save the profile exists + if err := os.MkdirAll(profileDirectory, 0755); err != nil { + return err + } + + p := profileData{ + Name: name, + } + + f, err := os.OpenFile(defaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + f.Close() + + if err := aaparser.LoadProfile(defaultProfilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a passed profile has been loaded into the kernel. +func IsLoaded(name string) error { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return err + } + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err != nil { + return err + } + if strings.HasPrefix(p, name+" ") { + return nil + } + } +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go new file mode 100644 index 00000000..ada33bf0 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/efi/efivars/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer=docker-default, +{{end}} +} +` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json new file mode 100755 index 00000000..5c70f88a --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -0,0 +1,1628 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "name": "accept", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "accept4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "access", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "alarm", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "arch_prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "bind", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "brk", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "capset", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "chroot", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_getres", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clock_nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "connect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "copy_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "creat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "dup3", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_create1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_ctl_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_pwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "epoll_wait_old", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "eventfd2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execve", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "execveat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "exit_group", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "faccessat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fadvise64_64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fallocate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fanotify_mark", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchmodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fchownat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fcntl64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fdatasync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "flock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fstatfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "fsync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ftruncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futex", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "futimesat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcpu", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getcwd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getdents64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getegid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "geteuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpeername", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpgrp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getppid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrandom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getrusage", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "get_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "gettimeofday", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "getxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_add_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_init1", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "inotify_rm_watch", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_cancel", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_destroy", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_getevents", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_get", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ioprio_set", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_setup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "io_submit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ipc", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "kill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lchown32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lgetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "link", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "linkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listen", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "listxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "llistxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_llseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lremovexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lseek", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lsetxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "lstat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "madvise", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "memfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mincore", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mkdirat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknod", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mknodat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlock2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mmap2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mprotect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_getsetattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_notify", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedreceive", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_timedsend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mq_unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "mremap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgrcv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msgsnd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "msync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlock", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munlockall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "munmap", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "nanosleep", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "newfstatat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "_newselect", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "openat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pause", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "personality", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "name": "pipe", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pipe2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "poll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ppoll", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pread64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "preadv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "prlimit64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pselect6", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwrite64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "pwritev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "read", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readahead", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "readv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recv", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvfrom", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "recvmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "remap_file_pages", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "removexattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rename", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "renameat2", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "restart_syscall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rmdir", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigaction", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigpending", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigprocmask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigsuspend", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_sigtimedwait", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "rt_tgsigqueueinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_max", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_get_priority_min", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_getscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_rr_get_interval", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setaffinity", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setparam", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_setscheduler", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sched_yield", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "seccomp", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "select", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "semtimedop", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "send", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendfile64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendmsg", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sendto", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setdomainname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setfsuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setgroups32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sethostname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setitimer", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setpriority", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setregid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresgid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setresuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setreuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_robust_list", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setsockopt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_thread_area", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tid_address", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setuid32", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "setxattr", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmctl", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmdt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shmget", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "shutdown", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigaltstack", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "signalfd4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sigreturn", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socket", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketcall", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "socketpair", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "splice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "stat64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "statfs64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "symlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sync_file_range", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syncfs", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "sysinfo", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "syslog", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tee", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tgkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "time", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_delete", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_create", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timerfd_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_getoverrun", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_gettime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "timer_settime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "times", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "tkill", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "truncate64", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "ugetrlimit", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "umask", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "uname", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlink", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "unlinkat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utime", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimensat", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "utimes", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vfork", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vhangup", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "vmsplice", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "wait4", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "waitpid", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "write", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "writev", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "modify_ldt", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "breakpoint", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "cacheflush", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "set_tls", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go new file mode 100644 index 00000000..bf565947 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile, "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go new file mode 100644 index 00000000..0718d840 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -0,0 +1,74 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "fmt" + + "github.com/docker/engine-api/types" + "github.com/opencontainers/specs/specs-go" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile() (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + + return setupSeccomp(&config) +} + +func setupSeccomp(config *types.Seccomp) (newConfig *specs.Seccomp, err error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig = &specs.Seccomp{} + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) > 0 { + for _, arch := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(arch)) + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + + // Loop through all syscall blocks and convert them to libcontainer format + for _, call := range config.Syscalls { + newCall := specs.Syscall{ + Name: call.Name, + Action: specs.Action(call.Action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range call.Args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + + newConfig.Syscalls = append(newConfig.Syscalls, newCall) + } + + return newConfig, nil +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go new file mode 100644 index 00000000..4fad7a6c --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -0,0 +1,1659 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/engine-api/types" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +func arches() []types.Arch { + var native, err = libseccomp.GetNativeArch() + if err != nil { + return []types.Arch{} + } + var a = native.String() + switch a { + case "amd64": + return []types.Arch{types.ArchX86_64, types.ArchX86, types.ArchX32} + case "arm64": + return []types.Arch{types.ArchARM, types.ArchAARCH64} + case "mips64": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mips64n32": + return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} + case "mipsel64": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + case "mipsel64n32": + return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} + default: + return []types.Arch{} + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +var DefaultProfile = &types.Seccomp{ + DefaultAction: types.ActErrno, + Architectures: arches(), + Syscalls: []*types.Syscall{ + { + Name: "accept", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "accept4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "access", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "alarm", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "arch_prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "bind", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "brk", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "capset", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "chroot", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_getres", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clock_nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "clone", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + }, + { + Name: "close", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "connect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "copy_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "creat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "dup3", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_create1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_ctl_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_pwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "epoll_wait_old", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "eventfd2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execve", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "execveat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "exit_group", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "faccessat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fadvise64_64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fallocate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fanotify_mark", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchmodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fchownat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fcntl64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fdatasync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "flock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fstatfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "fsync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ftruncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futex", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "futimesat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcpu", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getcwd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getdents64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getegid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "geteuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpeername", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpgrp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getppid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrandom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getrusage", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "get_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "gettimeofday", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "getxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_add_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_init1", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "inotify_rm_watch", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_cancel", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_destroy", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_getevents", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_get", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ioprio_set", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_setup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "io_submit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ipc", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "kill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lchown32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lgetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "link", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "linkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listen", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "listxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "llistxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_llseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lremovexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lseek", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lsetxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "lstat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "madvise", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "memfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mincore", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mkdirat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknod", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mknodat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlock2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mmap2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mprotect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_getsetattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_notify", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedreceive", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_timedsend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mq_unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "mremap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgrcv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msgsnd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "msync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlock", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munlockall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "munmap", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "nanosleep", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "newfstatat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "_newselect", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "open", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "openat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pause", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "personality", + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Name: "pipe", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pipe2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "poll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ppoll", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pread64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "preadv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "prlimit64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pselect6", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwrite64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "pwritev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "read", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readahead", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "readv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recv", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvfrom", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "recvmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "remap_file_pages", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "removexattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rename", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "renameat2", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "restart_syscall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rmdir", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigaction", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigpending", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigprocmask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigsuspend", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_sigtimedwait", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "rt_tgsigqueueinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_max", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_get_priority_min", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_getscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_rr_get_interval", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setaffinity", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setparam", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_setscheduler", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sched_yield", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "seccomp", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "select", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "semtimedop", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "send", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendfile64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendmsg", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sendto", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setdomainname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setfsuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setgroups32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sethostname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setitimer", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setpriority", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setregid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresgid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setresuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setreuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_robust_list", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setsockopt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_thread_area", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tid_address", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setuid32", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "setxattr", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmctl", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmdt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shmget", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "shutdown", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigaltstack", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "signalfd4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sigreturn", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socket", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socketcall", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "socketpair", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "splice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "stat64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "statfs64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "symlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sync_file_range", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syncfs", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "sysinfo", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "syslog", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tee", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tgkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "time", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_delete", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_create", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timerfd_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_getoverrun", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_gettime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "timer_settime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "times", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "tkill", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "truncate64", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "ugetrlimit", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "umask", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "uname", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlink", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "unlinkat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utime", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimensat", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "utimes", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vfork", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vhangup", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "vmsplice", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "wait4", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "waitpid", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "write", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "writev", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // i386 specific syscalls + { + Name: "modify_ldt", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + // arm specific syscalls + { + Name: "breakpoint", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "cacheflush", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Name: "set_tls", + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + }, +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 00000000..64963292 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,10 @@ +// +build linux,!seccomp + +package seccomp + +import "github.com/docker/engine-api/types" + +var ( + // DefaultProfile is a nil pointer on unsupported systems. + DefaultProfile *types.Seccomp +) diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 00000000..0e844de2 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,131 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/engine-api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + failureCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy) RestartManager { + return &restartManager{policy: policy, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + + if exitCode != 0 { + rm.failureCount++ + } else { + rm.failureCount = 0 + } + + // if the container ran for more than 10s, reguardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(), rm.policy.IsUnlessStopped(): + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.failureCount <= max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/runconfig/compare.go new file mode 100644 index 00000000..61346aab --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare.go @@ -0,0 +1,61 @@ +package runconfig + +import "github.com/docker/engine-api/types/container" + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 00000000..9f3f9c5e --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,71 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/volume" + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and an HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateVolumesAndBindSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 00000000..e5902fb0 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/engine-api/types/container" + networktypes "github.com/docker/engine-api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 00000000..d3608576 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,40 @@ +package runconfig + +import ( + "fmt" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 00000000..769cc9f5 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,35 @@ +package runconfig + +import ( + "encoding/json" + "io" + + "github.com/docker/engine-api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 00000000..efc26112 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,89 @@ +// +build !windows + +package runconfig + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/engine-api/types/container" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/streams.go b/vendor/github.com/docker/docker/runconfig/streams.go new file mode 100644 index 00000000..548c7826 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/streams.go @@ -0,0 +1,109 @@ +package runconfig + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" +) + +// StreamConfig holds information about I/O streams managed together. +// +// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type StreamConfig struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewStreamConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewStreamConfig() *StreamConfig { + return &StreamConfig{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { + return streamConfig.stdout +} + +// Stderr returns the standard error in the configuration. +func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { + return streamConfig.stderr +} + +// Stdin returns the standard input in the configuration. +func (streamConfig *StreamConfig) Stdin() io.ReadCloser { + return streamConfig.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { + return streamConfig.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe(nil) + streamConfig.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (streamConfig *StreamConfig) NewInputPipes() { + streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (streamConfig *StreamConfig) NewNopInputPipe() { + streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (streamConfig *StreamConfig) CloseStreams() error { + var errors []string + + if streamConfig.stdin != nil { + if err := streamConfig.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := streamConfig.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := streamConfig.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/trash.conf b/vendor/github.com/docker/docker/trash.conf index 6dbb226e..d0387800 100644 --- a/vendor/github.com/docker/docker/trash.conf +++ b/vendor/github.com/docker/docker/trash.conf @@ -14,14 +14,14 @@ github.com/codegangsta/cli 9fec0fad02befc9209347cc6d620e68e1b45f74d github.com/coreos/etcd v2.2.0 github.com/coreos/go-systemd v4 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/docker/containerd ebb6d97f443fdcbf7084e41356359c99421d93f5 https://github.com/ibuildthecloud/containerd.git +github.com/docker/containerd 8c538d6b92f2c512c3ad8a854826461ac9095b74 https://github.com/ibuildthecloud/containerd.git github.com/docker/distribution 467fc068d88aa6610691b7f1a677271a3fac4aac github.com/docker/engine-api v0.3.3 github.com/docker/go v1.5.1-1-1-gbaf439e github.com/docker/go-connections v0.2.0 github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 github.com/docker/libkv c2aac5dbbaa5c872211edea7c0f32b3bd67e7410 -github.com/docker/libnetwork v0.7.0-rc.7 +github.com/docker/libnetwork v0.7.2-rc.1 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/docker/notary docker-v1.11-3 github.com/fluent/fluent-logger-golang v1.1.0 diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go new file mode 100644 index 00000000..d2038911 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/experimental.go b/vendor/github.com/docker/docker/utils/experimental.go new file mode 100644 index 00000000..ceed0cb3 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/experimental.go @@ -0,0 +1,9 @@ +// +build experimental + +package utils + +// ExperimentalBuild is a stub which always returns true for +// builds that include the "experimental" build tag +func ExperimentalBuild() bool { + return true +} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go new file mode 100644 index 00000000..8239c0de --- /dev/null +++ b/vendor/github.com/docker/docker/utils/names.go @@ -0,0 +1,12 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) + +// RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. +var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/utils/process_unix.go new file mode 100644 index 00000000..bdb1b46b --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/utils/stubs.go b/vendor/github.com/docker/docker/utils/stubs.go new file mode 100644 index 00000000..8a496d39 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/stubs.go @@ -0,0 +1,9 @@ +// +build !experimental + +package utils + +// ExperimentalBuild is a stub which always returns false for +// builds that do not include the "experimental" build tag +func ExperimentalBuild() bool { + return false +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go new file mode 100644 index 00000000..749da3d5 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates.go @@ -0,0 +1,33 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go new file mode 100644 index 00000000..d3dd00ab --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 00000000..e7ca3d50 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,106 @@ +package volumedrivers + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +type volumeDriverAdapter struct { + name string + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + driverName: a.name, + eMount: vp.Mountpoint, + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, fmt.Errorf("no such volume") + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + }, nil +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + driverName string + eMount string // ephemeral host volume path +} + +type proxyVolume struct { + Name string + Mountpoint string +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) > 0 { + return a.eMount + } + m, _ := a.proxy.Path(a.name) + return m +} + +func (a *volumeAdapter) Mount() (string, error) { + var err error + a.eMount, err = a.proxy.Mount(a.name) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount() error { + return a.proxy.Unmount(a.name) +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 00000000..a55da5af --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,164 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{extensions: make(map[string]volume.Driver), driverLock: &locker.Locker{}} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, proxy: proxy} +} + +type opts map[string]string +type list []*proxyVolume + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts opts) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name string) (err error) + // List lists all the volumes known to the driver + List() (volumes list, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + drivers.extensions[name] = extension + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// Lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func Lookup(name string) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + + pl, err := plugins.Get(name, extName) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + drivers.Lock() + defer drivers.Unlock() + if ext, ok := drivers.extensions[name]; ok { + return ext, nil + } + + d := NewVolumeDriver(name, pl.Client) + drivers.extensions[name] = d + return d, nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return Lookup(name) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + plugins, err := plugins.GetAll(extName) + if err != nil { + return nil, err + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + ext, ok := drivers.extensions[p.Name] + if ok { + continue + } + + ext = NewVolumeDriver(p.Name, p.Client) + drivers.extensions[p.Name] = ext + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 00000000..5c7cdcb7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,207 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import "errors" + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts opts +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts opts) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes list + Err string +} + +func (pp *volumeDriverProxy) List() (volumes list, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go new file mode 100644 index 00000000..0bca731a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,330 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedVolumeNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + if b, err := ioutil.ReadFile(filepath.Join(name, "opts.json")); err == nil { + if err := json.Unmarshal(b, v.opts); err != nil { + return nil, err + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, err + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if opts != nil { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, err + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +func (r *Root) validateName(name string) error { + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + usedCount int + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount() (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount() error { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + v.active.count-- + if v.active.count == 0 { + if err := mount.Unmount(v.path); err != nil { + v.active.count++ + return err + } + v.active.mounted = false + } + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 00000000..2e63777a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + return mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, v.opts.MountOpts) +} diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/store/errors.go new file mode 100644 index 00000000..7bdfa12b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/errors.go @@ -0,0 +1,74 @@ +package store + +import ( + "errors" + "strings" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("conflict: volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = pe.Err + } + return err == expected +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 00000000..b2ac9a8c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,506 @@ +package store + +import ( + "bytes" + "encoding/json" + "os" + "path/filepath" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" + volumeBucketName = "volumes" +) + +type volumeMetadata struct { + Name string + Labels map[string]string +} + +type volumeWithLabels struct { + volume.Volume + labels map[string]string +} + +func (v volumeWithLabels) Labels() map[string]string { + return v.labels +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte(volumeBucketName)); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + } + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +func (s *VolumeStore) purge(name string) { + s.globalLock.Lock() + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + locks *locker.Locker + globalLock sync.Mutex + // names stores the volume name -> driver name relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + var ( + ls []volume.Volume + warnings []string + ) + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This is just like Create() except we store the reference while holding the lock. +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + s.setNamed(v, "") + return v, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored for the given volume name, an error is returned. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + if v, exists := s.getNamed(name); exists { + if v.DriverName() != driverName && driverName != "" && driverName != volume.DefaultDriverName { + return nil, errNameConflict + } + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.GetDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err := vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.globalLock.Unlock() + + if s.db != nil { + metadata := &volumeMetadata{ + Name: name, + Labels: labels, + } + + volData, err := json.Marshal(metadata) + if err != nil { + return nil, err + } + + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + err := b.Put([]byte(name), volData) + return err + }); err != nil { + return nil, err + } + } + + return volumeWithLabels{v, labels}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + if labels, ok := s.labels[name]; ok { + return volumeWithLabels{v, labels}, nil + } + return v, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + labels := map[string]string{} + + if s.db != nil { + // get meta + if err := s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(volumeBucketName)) + data := b.Get([]byte(name)) + + if string(data) == "" { + return nil + } + + var meta volumeMetadata + buf := bytes.NewBuffer(data) + + if err := json.NewDecoder(buf).Decode(&meta); err != nil { + return err + } + labels = meta.Labels + + return nil + }); err != nil { + return nil, err + } + } + + logrus.Debugf("Getting volume reference for name: %s", name) + s.globalLock.Lock() + v, exists := s.names[name] + s.globalLock.Unlock() + if exists { + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return nil, err + } + vol, err := vd.Get(name) + if err != nil { + return nil, err + } + return volumeWithLabels{vol, labels}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil { + continue + } + + return volumeWithLabels{v, labels}, nil + } + return nil, errNoSuchVolume +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + if refs, exists := s.refs[name]; exists && len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := withoutLabels(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + refs, exists := s.refs[v.Name()] + if !exists { + return nil + } + + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.refs[v.Name()]) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func withoutLabels(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWithLabels); ok { + return vol.Volume + } + + return v +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 00000000..319c541d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 00000000..077cddfb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,133 @@ +package volume + +import ( + "fmt" + "os" + "runtime" + "strings" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName string = "local" + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount() (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount() error +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + Source string // Container host directory + Destination string // Inside the container + RW bool // True if writable + Name string // Name set by user + Driver string // Volume driver to use + Volume Volume `json:"-"` + + // Note Mode is not used on Windows + Mode string `json:"Relabel"` // Originally field was `Relabel`" + + // Note Propagation is not used on Windows + Propagation string // Mount propagation string + Named bool // specifies if the mountpoint was specified by name + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup() (string, error) { + if m.Volume != nil { + return m.Volume.Mount() + } + if len(m.Source) > 0 { + if _, err := os.Stat(m.Source); err != nil { + if !os.IsNotExist(err) { + return "", err + } + if runtime.GOOS != "windows" { // Windows does not have deprecation issues here + if err := os.MkdirAll(m.Source, 0755); err != nil { + return "", err + } + } + } + return m.Source, nil + } + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec) + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("Invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go new file mode 100644 index 00000000..067537fb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy.go @@ -0,0 +1,28 @@ +package volume + +import "strings" + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go new file mode 100644 index 00000000..f5f28205 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go @@ -0,0 +1,44 @@ +// +build linux + +package volume + +import ( + "strings" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +const DefaultPropagationMode string = "rprivate" + +// propagation modes +var propagationModes = map[string]bool{ + "private": true, + "rprivate": true, + "slave": true, + "rslave": true, + "shared": true, + "rshared": true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) string { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return o + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[o] { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go new file mode 100644 index 00000000..0edc89ab --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux + +package volume + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode string = "" + +// propagation modes not supported on this platform. +var propagationModes = map[string]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) string { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go new file mode 100644 index 00000000..2520d7c1 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -0,0 +1,186 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "path/filepath" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// ParseMountSpec validates the configuration of mount information is valid. +func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) { + spec = filepath.ToSlash(spec) + + mp := &MountPoint{ + RW: true, + Propagation: DefaultPropagationMode, + } + if strings.Count(spec, ":") > 2 { + return nil, errInvalidSpec(spec) + } + + arr := strings.SplitN(spec, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(spec) + } + + switch len(arr) { + case 1: + // Just a destination path in the container + mp.Destination = filepath.Clean(arr[0]) + case 2: + if isValid := ValidMountMode(arr[1]); isValid { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, errInvalidSpec(spec) + } + // Host Source Path or Name + Destination + mp.Source = arr[0] + mp.Destination = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + mp.Source = arr[0] + mp.Destination = arr[1] + mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label + if !ValidMountMode(mp.Mode) { + return nil, errInvalidMode(mp.Mode) + } + mp.RW = ReadWrite(mp.Mode) + mp.Propagation = GetPropagation(mp.Mode) + default: + return nil, errInvalidSpec(spec) + } + + //validate the volumes destination path + mp.Destination = filepath.Clean(mp.Destination) + if !filepath.IsAbs(mp.Destination) { + return nil, fmt.Errorf("Invalid volume destination path: '%s' mount path must be absolute.", mp.Destination) + } + + // Destination cannot be "/" + if mp.Destination == "/" { + return nil, fmt.Errorf("Invalid specification: destination can't be '/' in '%s'", spec) + } + + name, source := ParseVolumeSource(mp.Source) + if len(source) == 0 { + mp.Source = "" // Clear it out as we previously assumed it was not a name + mp.Driver = volumeDriver + // Named volumes can't have propagation properties specified. + // Their defaults will be decided by docker. This is just a + // safeguard. Don't want to get into situations where named + // volumes were mounted as '[r]shared' inside container and + // container does further mounts under that volume and these + // mounts become visible on host and later original volume + // cleanup becomes an issue if container does not unmount + // submounts explicitly. + if HasPropagation(mp.Mode) { + return nil, errInvalidSpec(spec) + } + } else { + mp.Source = filepath.Clean(source) + } + + copyData, isSet := getCopyMode(mp.Mode) + // do not allow copy modes on binds + if len(name) == 0 && isSet { + return nil, errInvalidMode(mp.Mode) + } + + mp.CopyData = copyData + mp.Name = name + + return mp, nil +} + +// ParseVolumeSource parses the origin sources that's mounted into the container. +// It returns a name and a source. It looks to see if the spec passed in +// is an absolute file. If it is, it assumes the spec is a source. If not, +// it assumes the spec is a name. +func ParseVolumeSource(spec string) (string, string) { + if !filepath.IsAbs(spec) { + return spec, "" + } + return "", spec +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[o]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + + return true +} diff --git a/vendor/github.com/docker/engine-api/types/events/events.go b/vendor/github.com/docker/engine-api/types/events/events.go new file mode 100644 index 00000000..c5987aaf --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/events/events.go @@ -0,0 +1,38 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md new file mode 100644 index 00000000..76c516e6 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go new file mode 100644 index 00000000..4ed43358 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go new file mode 100644 index 00000000..ed800061 --- /dev/null +++ b/vendor/github.com/docker/engine-api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for API prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 00000000..05be0f8a --- /dev/null +++ b/vendor/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE new file mode 100644 index 00000000..27448585 --- /dev/null +++ b/vendor/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 00000000..9768175f --- /dev/null +++ b/vendor/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 00000000..8e7db381 --- /dev/null +++ b/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 00000000..3dcca33c --- /dev/null +++ b/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 00000000..ec5d2159 --- /dev/null +++ b/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 00000000..00bbe4b3 --- /dev/null +++ b/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 00000000..5b2b4fca --- /dev/null +++ b/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 00000000..a2df787d --- /dev/null +++ b/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 00000000..cb2ca9a7 --- /dev/null +++ b/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 00000000..73642db2 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 00000000..c526de54 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 00000000..9a98ae35 --- /dev/null +++ b/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 00000000..dac4cacf --- /dev/null +++ b/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 00000000..d88176cc --- /dev/null +++ b/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 00000000..c88f9b2b --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +