From afcb0f8abea4163daf049e5541f9f3ba70462bf4 Mon Sep 17 00:00:00 2001 From: Avi Deitcher Date: Sun, 12 Apr 2020 15:26:32 +0300 Subject: [PATCH] use manifest-tool as library instead of cmd exec Signed-off-by: Avi Deitcher --- src/cmd/linuxkit/pkglib/docker.go | 132 +- src/cmd/linuxkit/vendor.conf | 38 +- .../vendor/github.com/Nvveen/Gotty/LICENSE | 26 - .../vendor/github.com/Nvveen/Gotty/README | 5 - .../github.com/Nvveen/Gotty/attributes.go | 514 -- .../vendor/github.com/Nvveen/Gotty/gotty.go | 244 - .../vendor/github.com/Nvveen/Gotty/parser.go | 362 - .../vendor/github.com/Nvveen/Gotty/types.go | 23 - .../containerd/containerd/README.md | 63 +- .../containerd/containerd/errdefs/errors.go | 93 + .../containerd/containerd/errdefs/grpc.go | 147 + .../containerd/containerd/log/context.go | 90 + .../containerd/platforms/compare.go | 229 + .../containerd/platforms/cpuinfo.go | 117 + .../containerd/platforms/database.go | 114 + .../containerd/platforms/defaults.go | 38 + .../containerd/platforms/defaults_unix.go | 24 + .../containerd/platforms/defaults_windows.go | 31 + .../containerd/platforms/platforms.go | 278 + .../containerd/containerd/vendor.conf | 134 +- .../github.com/containerd/continuity/LICENSE | 19 +- .../containerd/continuity/README.md | 10 + .../containerd/continuity/devices/devices.go | 21 + .../continuity/devices/devices_unix.go | 74 + .../continuity/devices/devices_windows.go | 27 + .../containerd/continuity/driver/driver.go | 174 + .../continuity/driver/driver_unix.go | 138 + .../continuity/driver/driver_windows.go | 43 + .../continuity/driver/lchmod_linux.go | 39 + .../continuity/driver/lchmod_unix.go | 34 + .../containerd/continuity/driver/utils.go | 90 + .../containerd/continuity/fs/copy.go | 176 + .../containerd/continuity/fs/copy_linux.go | 147 + .../containerd/continuity/fs/copy_unix.go | 112 + .../containerd/continuity/fs/copy_windows.go | 49 + .../containerd/continuity/fs/diff.go | 326 + .../containerd/continuity/fs/diff_unix.go | 74 + .../containerd/continuity/fs/diff_windows.go | 48 + .../containerd/continuity/fs/dtype_linux.go | 103 + .../github.com/containerd/continuity/fs/du.go | 38 + .../containerd/continuity/fs/du_unix.go | 110 + .../containerd/continuity/fs/du_windows.go | 82 + .../containerd/continuity/fs/hardlink.go | 43 + .../containerd/continuity/fs/hardlink_unix.go | 34 + .../continuity/fs/hardlink_windows.go | 23 + .../containerd/continuity/fs/path.go | 313 + .../continuity/fs/stat_darwinfreebsd.go | 44 + .../continuity/fs/stat_linuxopenbsd.go | 45 + .../containerd/continuity/fs/time.go | 29 + .../github.com/containerd/continuity/go.mod | 23 + .../continuity/pathdriver/path_driver.go | 16 + .../continuity/syscallx/syscall_unix.go | 26 + .../continuity/syscallx/syscall_windows.go | 112 + .../containerd/continuity/sysx/README.md | 3 + .../containerd/continuity/sysx/file_posix.go | 128 + .../continuity/sysx/nodata_linux.go | 23 + .../continuity/sysx/nodata_solaris.go | 24 + .../containerd/continuity/sysx/nodata_unix.go | 25 + .../containerd/continuity/sysx/xattr.go | 117 + .../continuity/sysx/xattr_unsupported.go | 67 + .../containerd/continuity/vendor.conf | 13 - .../github.com/coreos/go-systemd/LICENSE | 191 + .../github.com/coreos/go-systemd/NOTICE | 5 + .../github.com/coreos/go-systemd/README.md | 71 + .../github.com/coreos/go-systemd/go.mod | 5 + .../github.com/coreos/go-systemd/v22/LICENSE | 191 + .../github.com/coreos/go-systemd/v22/NOTICE | 5 + .../coreos/go-systemd/v22/README.md | 71 + .../coreos/go-systemd/v22/dbus/dbus.go | 240 + .../coreos/go-systemd/v22/dbus/methods.go | 600 ++ .../coreos/go-systemd/v22/dbus/properties.go | 237 + .../coreos/go-systemd/v22/dbus/set.go | 47 + .../go-systemd/v22/dbus/subscription.go | 333 + .../go-systemd/v22/dbus/subscription_set.go | 57 + .../github.com/coreos/go-systemd/v22/go.mod | 5 + .../github.com/docker/cli/cli/command/cli.go | 77 +- .../docker/cli/cli/command/registry.go | 6 +- .../docker/cli/cli/command/utils.go | 4 +- .../docker/cli/cli/config/config.go | 4 +- .../docker/cli/cli/connhelper/connhelper.go | 302 + .../cli/cli/connhelper/connhelper_linux.go | 12 + .../cli/cli/connhelper/connhelper_nolinux.go | 10 + .../docker/cli/cli/connhelper/ssh/ssh.go | 70 + .../github.com/docker/cli/cli/debug/debug.go | 26 + .../github.com/docker/cli/cli/flags/common.go | 3 +- .../docker/cli/cli/registry/client/client.go | 28 + .../docker/cli/cli/registry/client/fetcher.go | 14 +- .../github.com/docker/cli/opts/envfile.go | 2 +- .../vendor/github.com/docker/cli/opts/file.go | 14 +- .../github.com/docker/cli/opts/hosts.go | 2 + .../vendor/github.com/docker/cli/opts/opts.go | 65 +- .../github.com/docker/cli/opts/parse.go | 4 +- .../vendor/github.com/docker/cli/opts/port.go | 21 +- .../github.com/docker/cli/types/types.go | 88 + .../vendor/github.com/docker/cli/vendor.conf | 195 +- .../github.com/docker/distribution/README.md | 91 +- .../github.com/docker/distribution/blobs.go | 10 +- .../docker/distribution/context/context.go | 73 + .../docker/distribution/context/doc.go | 88 + .../docker/distribution/context/http.go | 337 + .../docker/distribution/context/logger.go | 121 + .../docker/distribution/context/trace.go | 105 + .../docker/distribution/context/util.go | 25 + .../docker/distribution/context/version.go | 22 + .../github.com/docker/distribution/errors.go | 4 + .../github.com/docker/distribution/go.mod | 50 + .../manifest/manifestlist/manifestlist.go | 75 +- .../manifest/ocischema/builder.go | 107 + .../manifest/ocischema/manifest.go | 124 + .../manifest/schema1/config_builder.go | 287 + .../distribution/manifest/schema1/manifest.go | 184 + .../manifest/schema1/reference_builder.go | 98 + .../distribution/manifest/schema1/sign.go | 68 + .../distribution/manifest/schema1/verify.go | 32 + .../distribution/manifest/schema2/manifest.go | 12 +- .../docker/distribution/manifests.go | 2 +- .../docker/distribution/metrics/prometheus.go | 3 + .../distribution/reference/normalize.go | 29 + .../distribution/reference/reference.go | 2 +- .../docker/distribution/registry.go | 5 + .../registry/api/errcode/errors.go | 6 +- .../registry/api/errcode/handler.go | 2 +- .../registry/api/v2/descriptors.go | 58 +- .../distribution/registry/api/v2/routes.go | 9 - .../distribution/registry/api/v2/urls.go | 12 - .../client/auth/challenge/authchallenge.go | 4 +- .../registry/client/auth/session.go | 6 +- .../registry/client/blob_writer.go | 4 +- .../registry/client/repository.go | 58 +- .../registry/client/transport/http_reader.go | 9 +- .../registry/client/transport/transport.go | 16 +- .../cache/cachedblobdescriptorstore.go | 92 +- .../github.com/docker/distribution/tags.go | 10 + .../docker/distribution/vendor.conf | 49 - .../vendor/github.com/docker/docker/LICENSE | 2 +- .../vendor/github.com/docker/docker/NOTICE | 2 +- .../github.com/docker/docker/api/common.go | 2 +- .../docker/docker/api/types/client.go | 9 + .../docker/api/types/container/config.go | 2 +- .../api/types/container/container_changes.go | 2 +- .../api/types/container/container_create.go | 2 +- .../api/types/container/container_top.go | 2 +- .../api/types/container/container_update.go | 2 +- .../api/types/container/container_wait.go | 2 +- .../docker/api/types/container/host_config.go | 22 +- .../docker/docker/api/types/filters/parse.go | 70 +- .../docker/api/types/image/image_history.go | 2 +- .../docker/docker/api/types/mount/mount.go | 3 +- .../docker/api/types/network/network.go | 13 +- .../docker/docker/api/types/seccomp.go | 5 +- .../docker/docker/api/types/stats.go | 4 +- .../docker/docker/api/types/swarm/config.go | 7 +- .../docker/api/types/swarm/container.go | 2 + .../docker/docker/api/types/swarm/swarm.go | 12 +- .../docker/docker/api/types/swarm/task.go | 1 + .../docker/docker/api/types/types.go | 33 +- .../docker/api/types/volume/volume_create.go | 4 +- .../docker/api/types/volume/volume_list.go | 4 +- .../github.com/docker/docker/client/README.md | 2 +- .../docker/docker/client/build_cancel.go | 11 +- .../docker/docker/client/build_prune.go | 21 +- .../docker/docker/client/checkpoint_list.go | 2 +- .../github.com/docker/docker/client/client.go | 223 +- .../docker/docker/client/client_deprecated.go | 23 + .../docker/docker/client/config_create.go | 2 +- .../docker/docker/client/config_inspect.go | 2 +- .../docker/docker/client/config_list.go | 2 +- .../docker/docker/client/config_remove.go | 2 +- .../docker/docker/client/container_commit.go | 2 +- .../docker/docker/client/container_copy.go | 6 +- .../docker/docker/client/container_create.go | 6 +- .../docker/docker/client/container_diff.go | 2 +- .../docker/docker/client/container_exec.go | 2 +- .../docker/docker/client/container_inspect.go | 4 +- .../docker/docker/client/container_list.go | 2 +- .../docker/docker/client/container_prune.go | 2 +- .../docker/docker/client/container_remove.go | 2 +- .../docker/docker/client/container_top.go | 2 +- .../docker/docker/client/container_update.go | 3 +- .../docker/docker/client/disk_usage.go | 2 +- .../docker/client/distribution_inspect.go | 2 +- .../github.com/docker/docker/client/errors.go | 28 +- .../github.com/docker/docker/client/hijack.go | 24 +- .../docker/docker/client/image_build.go | 8 + .../docker/docker/client/image_history.go | 2 +- .../docker/docker/client/image_inspect.go | 2 +- .../docker/docker/client/image_list.go | 2 +- .../docker/docker/client/image_prune.go | 2 +- .../docker/docker/client/image_pull.go | 4 +- .../docker/docker/client/image_push.go | 4 +- .../docker/docker/client/image_remove.go | 2 +- .../docker/docker/client/image_search.go | 6 +- .../github.com/docker/docker/client/info.go | 2 +- .../docker/docker/client/interface.go | 5 +- .../github.com/docker/docker/client/login.go | 6 +- .../docker/docker/client/network_create.go | 4 +- .../docker/docker/client/network_inspect.go | 2 +- .../docker/docker/client/network_list.go | 2 +- .../docker/docker/client/network_prune.go | 2 +- .../docker/docker/client/network_remove.go | 2 +- .../docker/docker/client/node_inspect.go | 2 +- .../docker/docker/client/node_list.go | 2 +- .../docker/docker/client/node_remove.go | 2 +- .../docker/docker/client/options.go | 172 + .../github.com/docker/docker/client/ping.go | 56 +- .../docker/docker/client/plugin_create.go | 3 - .../docker/docker/client/plugin_inspect.go | 2 +- .../docker/docker/client/plugin_install.go | 4 +- .../docker/docker/client/plugin_list.go | 2 +- .../docker/docker/client/plugin_remove.go | 2 +- .../docker/docker/client/request.go | 48 +- .../docker/docker/client/secret_create.go | 2 +- .../docker/docker/client/secret_inspect.go | 2 +- .../docker/docker/client/secret_list.go | 2 +- .../docker/docker/client/secret_remove.go | 2 +- .../docker/docker/client/service_create.go | 2 +- .../docker/docker/client/service_inspect.go | 2 +- .../docker/docker/client/service_list.go | 2 +- .../docker/docker/client/service_remove.go | 2 +- .../docker/docker/client/service_update.go | 6 +- .../docker/docker/client/session.go | 18 - .../docker/client/swarm_get_unlock_key.go | 2 +- .../docker/docker/client/swarm_init.go | 2 +- .../docker/docker/client/swarm_inspect.go | 2 +- .../docker/docker/client/task_inspect.go | 2 +- .../docker/docker/client/task_list.go | 2 +- .../docker/docker/client/version.go | 2 +- .../docker/docker/client/volume_create.go | 2 +- .../docker/docker/client/volume_inspect.go | 2 +- .../docker/docker/client/volume_list.go | 2 +- .../docker/docker/client/volume_prune.go | 2 +- .../docker/docker/client/volume_remove.go | 2 +- .../docker/daemon/graphdriver/counter.go | 62 + .../docker/daemon/graphdriver/driver.go | 333 + .../daemon/graphdriver/driver_freebsd.go | 21 + .../docker/daemon/graphdriver/driver_linux.go | 124 + .../daemon/graphdriver/driver_unsupported.go | 13 + .../daemon/graphdriver/driver_windows.go | 12 + .../docker/daemon/graphdriver/errors.go | 36 + .../docker/daemon/graphdriver/fsdiff.go | 175 + .../docker/daemon/graphdriver/plugin.go | 55 + .../docker/docker/daemon/graphdriver/proxy.go | 264 + .../docker/docker/distribution/config.go | 266 + .../docker/docker/distribution/errors.go | 207 + .../docker/distribution/metadata/metadata.go | 75 + .../distribution/metadata/v1_id_service.go | 51 + .../metadata/v2_metadata_service.go | 241 + .../docker/docker/distribution/pull.go | 196 + .../docker/docker/distribution/pull_v2.go | 1013 +++ .../docker/distribution/pull_v2_unix.go | 67 + .../docker/distribution/pull_v2_windows.go | 146 + .../docker/docker/distribution/push.go | 180 + .../docker/docker/distribution/push_v2.go | 712 ++ .../docker/docker/distribution/registry.go | 158 + .../docker/distribution/xfer/download.go | 474 ++ .../docker/distribution/xfer/transfer.go | 401 + .../docker/docker/distribution/xfer/upload.go | 174 + .../docker/docker/dockerversion/useragent.go | 76 + .../docker/dockerversion/version_lib.go | 17 + .../github.com/docker/docker/errdefs/defs.go | 5 - .../docker/docker/errdefs/helpers.go | 67 +- .../docker/docker/errdefs/http_helpers.go | 198 + .../github.com/docker/docker/errdefs/is.go | 7 - .../github.com/docker/docker/image/fs.go | 175 + .../github.com/docker/docker/image/image.go | 232 + .../github.com/docker/docker/image/rootfs.go | 53 + .../github.com/docker/docker/image/store.go | 346 + .../docker/docker/image/v1/imagev1.go | 150 + .../github.com/docker/docker/layer/empty.go | 61 + .../docker/docker/layer/filestore.go | 355 + .../docker/docker/layer/filestore_unix.go | 15 + .../docker/docker/layer/filestore_windows.go | 35 + .../github.com/docker/docker/layer/layer.go | 237 + .../docker/docker/layer/layer_store.go | 777 ++ .../docker/layer/layer_store_windows.go | 11 + .../docker/docker/layer/layer_unix.go | 9 + .../docker/docker/layer/layer_windows.go | 46 + .../docker/docker/layer/migration.go | 193 + .../docker/docker/layer/mounted_layer.go | 112 + .../docker/docker/layer/ro_layer.go | 182 + .../docker/docker/layer/ro_layer_windows.go | 9 + .../github.com/docker/docker/oci/defaults.go | 213 + .../docker/docker/oci/devices_linux.go | 86 + .../docker/docker/oci/devices_unsupported.go | 20 + .../docker/docker/oci/namespaces.go | 13 + .../github.com/docker/docker/oci/oci.go | 67 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1284 +++ .../docker/pkg/archive/archive_linux.go | 261 + .../docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_unix.go | 115 + .../docker/pkg/archive/archive_windows.go | 67 + .../docker/docker/pkg/archive/changes.go | 445 ++ .../docker/pkg/archive/changes_linux.go | 286 + .../docker/pkg/archive/changes_other.go | 97 + .../docker/docker/pkg/archive/changes_unix.go | 43 + .../docker/pkg/archive/changes_windows.go | 34 + .../docker/docker/pkg/archive/copy.go | 480 ++ .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 260 + .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/pkg/chrootarchive/archive.go | 106 + .../docker/pkg/chrootarchive/archive_unix.go | 208 + .../pkg/chrootarchive/archive_windows.go | 29 + .../docker/pkg/chrootarchive/chroot_linux.go | 113 + .../docker/pkg/chrootarchive/chroot_unix.go | 16 + .../docker/docker/pkg/chrootarchive/diff.go | 23 + .../docker/pkg/chrootarchive/diff_unix.go | 130 + .../docker/pkg/chrootarchive/diff_windows.go | 45 + .../docker/pkg/chrootarchive/init_unix.go | 29 + .../docker/pkg/chrootarchive/init_windows.go | 4 + .../docker/docker/pkg/containerfs/archiver.go | 205 + .../docker/pkg/containerfs/containerfs.go | 87 + .../pkg/containerfs/containerfs_unix.go | 10 + .../pkg/containerfs/containerfs_windows.go | 15 + .../docker/docker/pkg/fileutils/fileutils.go | 298 + .../docker/pkg/fileutils/fileutils_darwin.go | 27 + .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/pkg/homedir/homedir_linux.go | 88 + .../docker/pkg/homedir/homedir_others.go | 20 + .../docker/docker/pkg/idtools/idtools.go | 48 +- .../docker/docker/pkg/idtools/idtools_unix.go | 9 +- .../docker/pkg/idtools/idtools_windows.go | 10 +- .../docker/pkg/jsonmessage/jsonmessage.go | 104 +- .../docker/docker/pkg/locker/README.md | 65 + .../docker/docker/pkg/locker/locker.go | 112 + .../docker/docker/pkg/mount/flags.go | 12 - .../docker/docker/pkg/mount/mount.go | 66 +- .../docker/pkg/mount/mounter_freebsd.go | 17 +- .../docker/docker/pkg/mount/mounter_linux.go | 30 +- .../docker/pkg/mount/mounter_unsupported.go | 4 - .../docker/pkg/mount/mountinfo_linux.go | 18 +- .../docker/pkg/mount/sharedsubtree_linux.go | 38 +- .../docker/docker/pkg/mount/unmount_unix.go | 22 + .../docker/pkg/mount/unmount_unsupported.go | 7 + .../docker/pkg/parsers/kernel/kernel.go | 74 + .../pkg/parsers/kernel/kernel_darwin.go | 56 + .../docker/pkg/parsers/kernel/kernel_unix.go | 35 + .../pkg/parsers/kernel/kernel_windows.go | 51 + .../docker/pkg/parsers/kernel/uname_linux.go | 17 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../docker/docker/pkg/plugingetter/getter.go | 52 + .../docker/docker/pkg/plugins/client.go | 242 + .../docker/docker/pkg/plugins/discovery.go | 154 + .../docker/pkg/plugins/discovery_unix.go | 5 + .../docker/pkg/plugins/discovery_windows.go | 8 + .../docker/docker/pkg/plugins/errors.go | 33 + .../docker/docker/pkg/plugins/plugins.go | 337 + .../docker/docker/pkg/plugins/plugins_unix.go | 9 + .../docker/pkg/plugins/plugins_windows.go | 7 + .../docker/pkg/plugins/transport/http.go | 36 + .../docker/pkg/plugins/transport/transport.go | 36 + .../docker/docker/pkg/pools/pools.go | 137 + .../docker/docker/pkg/progress/progress.go | 93 + .../docker/pkg/progress/progressreader.go | 66 + .../docker/docker/pkg/reexec/README.md | 5 + .../docker/docker/pkg/reexec/command_linux.go | 28 + .../docker/docker/pkg/reexec/command_unix.go | 23 + .../docker/pkg/reexec/command_unsupported.go | 16 + .../docker/pkg/reexec/command_windows.go | 21 + .../docker/docker/pkg/reexec/reexec.go | 47 + .../docker/docker/pkg/stringid/stringid.go | 44 +- .../docker/docker/pkg/symlink/LICENSE.APACHE | 191 + .../docker/docker/pkg/symlink/LICENSE.BSD | 27 + .../docker/docker/pkg/symlink/README.md | 6 + .../docker/docker/pkg/symlink/fs.go | 144 + .../docker/docker/pkg/symlink/fs_unix.go | 15 + .../docker/docker/pkg/symlink/fs_windows.go | 169 + .../docker/docker/pkg/system/args_windows.go | 16 + .../docker/pkg/system/filesys_windows.go | 2 - .../docker/docker/pkg/system/init_unix.go | 5 + .../docker/docker/pkg/system/init_windows.go | 37 +- .../docker/docker/pkg/system/lstat_unix.go | 3 +- .../docker/docker/pkg/system/path_unix.go | 10 + .../docker/docker/pkg/system/path_windows.go | 24 + .../github.com/docker/docker/pkg/system/rm.go | 2 +- .../docker/docker/pkg/system/stat_linux.go | 3 +- .../docker/docker/pkg/system/stat_unix.go | 3 +- .../docker/pkg/system/syscall_windows.go | 72 +- .../docker/docker/pkg/term/term_windows.go | 7 - .../docker/docker/pkg/useragent/README.md | 1 + .../docker/docker/pkg/useragent/useragent.go | 55 + .../docker/docker/plugin/v2/plugin.go | 311 + .../docker/docker/plugin/v2/plugin_linux.go | 141 + .../docker/plugin/v2/plugin_unsupported.go | 14 + .../docker/docker/plugin/v2/settable.go | 102 + .../docker/docker/reference/errors.go | 25 + .../docker/docker/reference/store.go | 348 + .../github.com/docker/docker/registry/auth.go | 1 - .../docker/docker/registry/config.go | 6 - .../docker/docker/registry/registry.go | 2 +- .../docker/docker/registry/service.go | 17 +- .../docker/docker/registry/service_v1.go | 40 - .../docker/docker/registry/service_v2.go | 4 +- .../github.com/docker/docker/vendor.conf | 260 +- .../vendor/github.com/docker/libtrust/LICENSE | 191 + .../github.com/docker/libtrust/README.md | 22 + .../docker/libtrust/certificates.go | 175 + .../vendor/github.com/docker/libtrust/doc.go | 9 + .../github.com/docker/libtrust/ec_key.go | 428 + .../github.com/docker/libtrust/filter.go | 50 + .../vendor/github.com/docker/libtrust/hash.go | 56 + .../github.com/docker/libtrust/jsonsign.go | 657 ++ .../vendor/github.com/docker/libtrust/key.go | 253 + .../github.com/docker/libtrust/key_files.go | 255 + .../github.com/docker/libtrust/key_manager.go | 175 + .../github.com/docker/libtrust/rsa_key.go | 427 + .../vendor/github.com/docker/libtrust/util.go | 363 + .../github.com/estesp/manifest-tool/LICENSE | 191 + .../github.com/estesp/manifest-tool/README.md | 311 + .../estesp/manifest-tool/docker/createml.go | 527 ++ .../estesp/manifest-tool/docker/inspect.go | 440 ++ .../estesp/manifest-tool/docker/inspect_v1.go | 173 + .../estesp/manifest-tool/docker/inspect_v2.go | 533 ++ .../estesp/manifest-tool/docker/util.go | 54 + .../estesp/manifest-tool/types/types.go | 51 + .../estesp/manifest-tool/vendor.conf | 49 + .../github.com/godbus/dbus/v5/.travis.yml | 50 + .../github.com/godbus/dbus/v5/CONTRIBUTING.md | 50 + .../vendor/github.com/godbus/dbus/v5/LICENSE | 25 + .../github.com/godbus/dbus/v5/MAINTAINERS | 3 + .../github.com/godbus/dbus/v5/README.markdown | 44 + .../dbus/v5/_examples/bluetooth_introspect.go | 25 + .../godbus/dbus/v5/_examples/eavesdrop.go | 31 + .../godbus/dbus/v5/_examples/introspect.go | 22 + .../godbus/dbus/v5/_examples/list-names.go | 28 + .../godbus/dbus/v5/_examples/monitor.go | 37 + .../godbus/dbus/v5/_examples/notification.go | 17 + .../godbus/dbus/v5/_examples/prop.go | 69 + .../godbus/dbus/v5/_examples/server.go | 48 + .../godbus/dbus/v5/_examples/signal.go | 25 + .../godbus/dbus/v5/_examples/tcp_conn.go | 58 + .../vendor/github.com/godbus/dbus/v5/auth.go | 252 + .../godbus/dbus/v5/auth_anonymous.go | 16 + .../godbus/dbus/v5/auth_external.go | 26 + .../github.com/godbus/dbus/v5/auth_sha1.go | 102 + .../vendor/github.com/godbus/dbus/v5/call.go | 60 + .../vendor/github.com/godbus/dbus/v5/conn.go | 912 +++ .../github.com/godbus/dbus/v5/conn_darwin.go | 37 + .../github.com/godbus/dbus/v5/conn_other.go | 93 + .../github.com/godbus/dbus/v5/conn_test.go | 560 ++ .../github.com/godbus/dbus/v5/conn_unix.go | 17 + .../github.com/godbus/dbus/v5/conn_windows.go | 15 + .../vendor/github.com/godbus/dbus/v5/dbus.go | 428 + .../github.com/godbus/dbus/v5/dbus_test.go | 24 + .../github.com/godbus/dbus/v5/decoder.go | 286 + .../github.com/godbus/dbus/v5/decoder_test.go | 88 + .../godbus/dbus/v5/default_handler.go | 328 + .../vendor/github.com/godbus/dbus/v5/doc.go | 69 + .../github.com/godbus/dbus/v5/encoder.go | 210 + .../github.com/godbus/dbus/v5/encoder_test.go | 414 + .../godbus/dbus/v5/examples_test.go | 50 + .../godbus/dbus/v5/exec_command_test.go | 61 + .../github.com/godbus/dbus/v5/export.go | 412 + .../github.com/godbus/dbus/v5/export_test.go | 635 ++ .../vendor/github.com/godbus/dbus/v5/go.mod | 3 + .../vendor/github.com/godbus/dbus/v5/go.sum | 0 .../github.com/godbus/dbus/v5/homedir.go | 28 + .../godbus/dbus/v5/homedir_dynamic.go | 15 + .../godbus/dbus/v5/homedir_static.go | 45 + .../godbus/dbus/v5/introspect/call.go | 28 + .../godbus/dbus/v5/introspect/introspect.go | 86 + .../dbus/v5/introspect/introspectable.go | 77 + .../vendor/github.com/godbus/dbus/v5/match.go | 62 + .../github.com/godbus/dbus/v5/match_test.go | 19 + .../github.com/godbus/dbus/v5/message.go | 353 + .../github.com/godbus/dbus/v5/object.go | 211 + .../github.com/godbus/dbus/v5/object_test.go | 156 + .../github.com/godbus/dbus/v5/prop/prop.go | 285 + .../github.com/godbus/dbus/v5/proto_test.go | 369 + .../godbus/dbus/v5/server_interfaces.go | 107 + .../godbus/dbus/v5/server_interfaces_test.go | 487 ++ .../vendor/github.com/godbus/dbus/v5/sig.go | 259 + .../github.com/godbus/dbus/v5/sig_test.go | 70 + .../github.com/godbus/dbus/v5/store_test.go | 99 + .../godbus/dbus/v5/transport_darwin.go | 6 + .../godbus/dbus/v5/transport_generic.go | 50 + .../godbus/dbus/v5/transport_nonce_tcp.go | 39 + .../dbus/v5/transport_nonce_tcp_test.go | 69 + .../godbus/dbus/v5/transport_tcp.go | 41 + .../godbus/dbus/v5/transport_tcp_test.go | 26 + .../godbus/dbus/v5/transport_unix.go | 214 + .../godbus/dbus/v5/transport_unix_test.go | 49 + .../dbus/v5/transport_unixcred_dragonfly.go | 95 + .../dbus/v5/transport_unixcred_freebsd.go | 91 + .../dbus/v5/transport_unixcred_linux.go | 25 + .../dbus/v5/transport_unixcred_openbsd.go | 14 + .../github.com/godbus/dbus/v5/variant.go | 144 + .../godbus/dbus/v5/variant_lexer.go | 284 + .../godbus/dbus/v5/variant_parser.go | 817 ++ .../github.com/godbus/dbus/v5/variant_test.go | 78 + .../vendor/github.com/gogo/protobuf/LICENSE | 5 +- .../vendor/github.com/gogo/protobuf/README | 96 +- .../vendor/github.com/gogo/protobuf/Readme.md | 40 +- .../vendor/github.com/gogo/protobuf/go.mod | 6 + .../github.com/gogo/protobuf/proto/clone.go | 46 +- .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 677 +- .../gogo/protobuf/proto/decode_gogo.go | 172 - .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 201 +- .../gogo/protobuf/proto/duration_gogo.go | 154 - .../github.com/gogo/protobuf/proto/encode.go | 1209 +-- .../gogo/protobuf/proto/encode_gogo.go | 317 - .../github.com/gogo/protobuf/proto/equal.go | 30 +- .../gogo/protobuf/proto/extensions.go | 288 +- .../gogo/protobuf/proto/extensions_gogo.go | 181 +- .../github.com/gogo/protobuf/proto/lib.go | 184 +- .../gogo/protobuf/proto/lib_gogo.go | 8 + .../gogo/protobuf/proto/message_set.go | 138 +- .../gogo/protobuf/proto/pointer_reflect.go | 645 +- .../protobuf/proto/pointer_reflect_gogo.go | 66 +- .../gogo/protobuf/proto/pointer_unsafe.go | 402 +- .../protobuf/proto/pointer_unsafe_gogo.go | 100 +- .../gogo/protobuf/proto/properties.go | 577 +- .../gogo/protobuf/proto/properties_gogo.go | 81 +- .../gogo/protobuf/proto/table_marshal.go | 3009 ++++++++ .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 676 ++ .../gogo/protobuf/proto/table_unmarshal.go | 2249 ++++++ .../protobuf/proto/table_unmarshal_gogo.go | 385 + .../github.com/gogo/protobuf/proto/text.go | 69 +- .../gogo/protobuf/proto/text_parser.go | 103 +- .../gogo/protobuf/proto/timestamp_gogo.go | 180 - .../gogo/protobuf/proto/wrappers.go | 1888 +++++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../protobuf/google/protobuf/any.proto | 22 +- .../protobuf/google/protobuf/api.proto | 210 + .../google/protobuf/compiler/plugin.proto | 1 + .../protobuf/google/protobuf/descriptor.proto | 151 +- .../protobuf/google/protobuf/duration.proto | 1 - .../protobuf/google/protobuf/field_mask.proto | 51 +- .../google/protobuf/source_context.proto | 48 + .../protobuf/google/protobuf/struct.proto | 1 - .../protobuf/google/protobuf/timestamp.proto | 43 +- .../protobuf/google/protobuf/type.proto | 187 + .../protobuf/google/protobuf/wrappers.proto | 5 + .../vendor/github.com/golang/protobuf/LICENSE | 3 - .../github.com/golang/protobuf/README.md | 45 +- .../vendor/github.com/golang/protobuf/go.mod | 3 + .../golang/protobuf/proto/decode.go | 1 - .../golang/protobuf/proto/deprecated.go | 63 + .../golang/protobuf/proto/encode.go | 18 - .../github.com/golang/protobuf/proto/equal.go | 3 +- .../golang/protobuf/proto/extensions.go | 78 +- .../github.com/golang/protobuf/proto/lib.go | 102 +- .../golang/protobuf/proto/message_set.go | 137 +- .../golang/protobuf/proto/pointer_reflect.go | 5 +- .../golang/protobuf/proto/pointer_unsafe.go | 15 +- .../golang/protobuf/proto/properties.go | 52 +- .../golang/protobuf/proto/table_marshal.go | 235 +- .../golang/protobuf/proto/table_unmarshal.go | 198 +- .../github.com/golang/protobuf/proto/text.go | 10 +- .../golang/protobuf/proto/text_parser.go | 6 +- .../github.com/golang/protobuf/ptypes/any.go | 141 + .../golang/protobuf/ptypes/any/any.pb.go | 203 + .../golang/protobuf/ptypes/any/any.proto | 155 + .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 163 + .../protobuf/ptypes/duration/duration.proto | 116 + .../golang/protobuf/ptypes/timestamp.go | 132 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 185 + .../protobuf/ptypes/timestamp/timestamp.proto | 138 + .../github.com/hashicorp/go-version/LICENSE | 354 + .../github.com/hashicorp/go-version/README.md | 65 + .../hashicorp/go-version/constraint.go | 204 + .../github.com/hashicorp/go-version/go.mod | 1 + .../hashicorp/go-version/version.go | 380 + .../go-version/version_collection.go | 17 + .../github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 48 + .../github.com/mattn/go-shellwords/go.mod | 3 + .../mattn/go-shellwords/shellwords.go | 215 + .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + .../vendor/github.com/morikuni/aec/LICENSE | 21 + .../vendor/github.com/morikuni/aec/README.md | 178 + .../vendor/github.com/morikuni/aec/aec.go | 137 + .../vendor/github.com/morikuni/aec/ansi.go | 59 + .../vendor/github.com/morikuni/aec/builder.go | 388 + .../vendor/github.com/morikuni/aec/sgr.go | 202 + .../github.com/opencontainers/runc/README.md | 78 +- .../github.com/opencontainers/runc/go.mod | 26 + .../runc/libcontainer/README.md | 9 +- .../runc/libcontainer/configs/blkio_device.go | 66 + .../runc/libcontainer/configs/cgroup_linux.go | 139 + .../configs/cgroup_unsupported.go | 8 + .../runc/libcontainer/configs/config.go | 354 + .../runc/libcontainer/configs/config_linux.go | 61 + .../runc/libcontainer/configs/device.go | 57 + .../libcontainer/configs/device_defaults.go | 111 + .../libcontainer/configs/hugepage_limit.go | 9 + .../runc/libcontainer/configs/intelrdt.go | 13 + .../configs/interface_priority_map.go | 14 + .../runc/libcontainer/configs/mount.go | 39 + .../runc/libcontainer/configs/namespaces.go | 5 + .../libcontainer/configs/namespaces_linux.go | 126 + .../configs/namespaces_syscall.go | 32 + .../configs/namespaces_syscall_unsupported.go | 13 + .../configs/namespaces_unsupported.go | 8 + .../runc/libcontainer/configs/network.go | 72 + .../runc/libcontainer/devices/devices.go | 110 + .../runc/libcontainer/nsenter/README.md | 6 +- .../runc/libcontainer/nsenter/cloned_binary.c | 540 ++ .../nsenter/nsenter_unsupported.go | 2 - .../runc/libcontainer/nsenter/nsexec.c | 180 +- .../runc/libcontainer/system/linux.go | 155 + .../runc/libcontainer/system/proc.go | 113 + .../libcontainer/system/syscall_linux_32.go | 26 + .../libcontainer/system/syscall_linux_64.go | 26 + .../runc/libcontainer/system/sysconfig.go | 12 + .../libcontainer/system/sysconfig_notcgo.go | 15 + .../runc/libcontainer/system/unsupported.go | 27 + .../runc/libcontainer/system/xattrs_linux.go | 35 + .../runc/libcontainer/user/lookup_unix.go | 22 +- .../runc/libcontainer/user/user.go | 28 +- .../opencontainers/runc/vendor.conf | 25 - .../github.com/vbatts/tar-split/LICENSE | 28 + .../github.com/vbatts/tar-split/README.md | 137 + .../vbatts/tar-split/archive/tar/common.go | 723 ++ .../vbatts/tar-split/archive/tar/format.go | 303 + .../vbatts/tar-split/archive/tar/reader.go | 923 +++ .../tar-split/archive/tar/stat_actime1.go | 20 + .../tar-split/archive/tar/stat_actime2.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 96 + .../vbatts/tar-split/archive/tar/strconv.go | 326 + .../vbatts/tar-split/archive/tar/writer.go | 653 ++ .../vbatts/tar-split/tar/asm/README.md | 44 + .../vbatts/tar-split/tar/asm/assemble.go | 130 + .../vbatts/tar-split/tar/asm/disassemble.go | 154 + .../vbatts/tar-split/tar/asm/doc.go | 9 + .../vbatts/tar-split/tar/storage/doc.go | 12 + .../vbatts/tar-split/tar/storage/entry.go | 78 + .../vbatts/tar-split/tar/storage/getter.go | 104 + .../vbatts/tar-split/tar/storage/packer.go | 127 + .../golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 + .../vendor/golang.org/x/sys/cpu/byteorder.go | 60 + .../vendor/golang.org/x/sys/cpu/cpu.go | 171 + .../golang.org/x/sys/cpu/cpu_aix_ppc64.go | 34 + .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 40 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 138 + .../vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + .../golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 + .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 + .../golang.org/x/sys/cpu/cpu_gccgo_x86.go | 26 + .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 + .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 33 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 161 + .../golang.org/x/sys/cpu/cpu_mips64x.go | 9 + .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 9 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 + .../golang.org/x/sys/cpu/cpu_riscv64.go | 9 + .../vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 + .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 13 + .../vendor/golang.org/x/sys/cpu/cpu_x86.go | 59 + .../vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 + .../golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + .../linuxkit/vendor/golang.org/x/sys/go.mod | 3 + .../vendor/golang.org/x/sys/unix/README.md | 35 +- .../golang.org/x/sys/unix/affinity_linux.go | 42 +- .../vendor/golang.org/x/sys/unix/aliases.go | 14 + .../golang.org/x/sys/unix/asm_aix_ppc64.s | 17 + .../x/sys/unix/asm_dragonfly_amd64.s | 10 +- .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 12 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 47 + .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 + .../golang.org/x/sys/unix/bluetooth_linux.go | 1 + .../golang.org/x/sys/unix/cap_freebsd.go | 30 +- .../vendor/golang.org/x/sys/unix/constants.go | 2 +- .../golang.org/x/sys/unix/dev_aix_ppc.go | 27 + .../golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + .../vendor/golang.org/x/sys/unix/dirent.go | 91 +- .../golang.org/x/sys/unix/endian_little.go | 2 +- .../vendor/golang.org/x/sys/unix/env_unix.go | 2 +- .../x/sys/unix/errors_freebsd_386.go | 6 + .../x/sys/unix/errors_freebsd_amd64.go | 6 + .../x/sys/unix/errors_freebsd_arm64.go | 17 + .../x/sys/unix/{flock.go => fcntl.go} | 18 +- .../golang.org/x/sys/unix/fcntl_darwin.go | 18 + ...ck_linux_32bit.go => fcntl_linux_32bit.go} | 0 .../vendor/golang.org/x/sys/unix/fdset.go | 29 + .../vendor/golang.org/x/sys/unix/gccgo.go | 1 + .../vendor/golang.org/x/sys/unix/gccgo_c.c | 10 +- .../vendor/golang.org/x/sys/unix/ioctl.go | 65 + .../golang.org/x/sys/unix/openbsd_pledge.go | 38 - .../golang.org/x/sys/unix/pagesize_unix.go | 2 +- .../golang.org/x/sys/unix/pledge_openbsd.go | 163 + .../vendor/golang.org/x/sys/unix/race0.go | 2 +- .../x/sys/unix/readdirent_getdents.go | 12 + .../x/sys/unix/readdirent_getdirentries.go | 19 + .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- .../golang.org/x/sys/unix/sockcmsg_unix.go | 28 +- .../x/sys/unix/sockcmsg_unix_other.go | 38 + .../vendor/golang.org/x/sys/unix/str.go | 2 +- .../vendor/golang.org/x/sys/unix/syscall.go | 14 +- .../golang.org/x/sys/unix/syscall_aix.go | 536 ++ .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 + .../x/sys/unix/syscall_aix_ppc64.go | 85 + .../golang.org/x/sys/unix/syscall_bsd.go | 84 +- .../x/sys/unix/syscall_darwin.1_12.go | 29 + .../x/sys/unix/syscall_darwin.1_13.go | 101 + .../golang.org/x/sys/unix/syscall_darwin.go | 236 +- .../x/sys/unix/syscall_darwin_386.1_11.go | 9 + .../x/sys/unix/syscall_darwin_386.go | 28 +- .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 + .../x/sys/unix/syscall_darwin_amd64.go | 28 +- .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm.go | 30 +- .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm64.go | 30 +- .../x/sys/unix/syscall_darwin_libSystem.go | 33 + .../x/sys/unix/syscall_dragonfly.go | 96 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 4 + .../golang.org/x/sys/unix/syscall_freebsd.go | 693 +- .../x/sys/unix/syscall_freebsd_386.go | 14 + .../x/sys/unix/syscall_freebsd_amd64.go | 14 + .../x/sys/unix/syscall_freebsd_arm.go | 10 + .../x/sys/unix/syscall_freebsd_arm64.go | 62 + .../golang.org/x/sys/unix/syscall_illumos.go | 57 + .../golang.org/x/sys/unix/syscall_linux.go | 982 ++- .../x/sys/unix/syscall_linux_386.go | 25 +- .../x/sys/unix/syscall_linux_amd64.go | 62 +- .../x/sys/unix/syscall_linux_arm.go | 46 +- .../x/sys/unix/syscall_linux_arm64.go | 93 +- .../x/sys/unix/syscall_linux_gc_386.go | 16 + .../x/sys/unix/syscall_linux_gccgo_386.go | 30 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 + .../x/sys/unix/syscall_linux_mips64x.go | 34 +- .../x/sys/unix/syscall_linux_mipsx.go | 29 +- .../x/sys/unix/syscall_linux_ppc64x.go | 39 +- .../x/sys/unix/syscall_linux_riscv64.go | 230 + .../x/sys/unix/syscall_linux_s390x.go | 26 +- .../x/sys/unix/syscall_linux_sparc64.go | 12 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 121 +- .../x/sys/unix/syscall_netbsd_386.go | 4 + .../x/sys/unix/syscall_netbsd_amd64.go | 4 + .../x/sys/unix/syscall_netbsd_arm.go | 4 + .../x/sys/unix/syscall_netbsd_arm64.go | 37 + .../golang.org/x/sys/unix/syscall_openbsd.go | 144 +- .../x/sys/unix/syscall_openbsd_386.go | 8 + .../x/sys/unix/syscall_openbsd_amd64.go | 8 + .../x/sys/unix/syscall_openbsd_arm.go | 8 + .../x/sys/unix/syscall_openbsd_arm64.go | 41 + .../golang.org/x/sys/unix/syscall_solaris.go | 81 +- .../x/sys/unix/syscall_solaris_amd64.go | 9 +- .../golang.org/x/sys/unix/syscall_unix.go | 172 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 + .../golang.org/x/sys/unix/timestruct.go | 2 +- .../golang.org/x/sys/unix/unveil_openbsd.go | 42 + .../vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1384 ++++ .../x/sys/unix/zerrors_aix_ppc64.go | 1385 ++++ .../x/sys/unix/zerrors_darwin_386.go | 295 +- .../x/sys/unix/zerrors_darwin_amd64.go | 295 +- .../x/sys/unix/zerrors_darwin_arm.go | 295 +- .../x/sys/unix/zerrors_darwin_arm64.go | 295 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 348 +- .../x/sys/unix/zerrors_freebsd_386.go | 462 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 460 +- .../x/sys/unix/zerrors_freebsd_arm.go | 318 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 1930 +++++ .../golang.org/x/sys/unix/zerrors_linux.go | 2469 ++++++ .../x/sys/unix/zerrors_linux_386.go | 2756 ++----- .../x/sys/unix/zerrors_linux_amd64.go | 2757 ++----- .../x/sys/unix/zerrors_linux_arm.go | 2767 ++----- .../x/sys/unix/zerrors_linux_arm64.go | 2740 ++----- .../x/sys/unix/zerrors_linux_mips.go | 2769 ++----- .../x/sys/unix/zerrors_linux_mips64.go | 2769 ++----- .../x/sys/unix/zerrors_linux_mips64le.go | 2769 ++----- .../x/sys/unix/zerrors_linux_mipsle.go | 2769 ++----- .../x/sys/unix/zerrors_linux_ppc64.go | 2882 ++----- .../x/sys/unix/zerrors_linux_ppc64le.go | 2882 ++----- .../x/sys/unix/zerrors_linux_riscv64.go | 771 ++ .../x/sys/unix/zerrors_linux_s390x.go | 2878 ++----- .../x/sys/unix/zerrors_linux_sparc64.go | 2013 +---- .../x/sys/unix/zerrors_netbsd_386.go | 316 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 316 +- .../x/sys/unix/zerrors_netbsd_arm.go | 316 +- .../x/sys/unix/zerrors_netbsd_arm64.go | 1763 +++++ .../x/sys/unix/zerrors_openbsd_386.go | 330 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 567 +- .../x/sys/unix/zerrors_openbsd_arm.go | 324 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 1790 +++++ .../x/sys/unix/zerrors_solaris_amd64.go | 374 +- ...acearm_linux.go => zptrace_armnn_linux.go} | 2 +- .../x/sys/unix/zptrace_linux_arm64.go | 17 + ...emips_linux.go => zptrace_mipsnn_linux.go} | 2 +- ...sle_linux.go => zptrace_mipsnnle_linux.go} | 2 +- ...trace386_linux.go => zptrace_x86_linux.go} | 2 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 ++++ .../x/sys/unix/zsyscall_aix_ppc64.go | 1442 ++++ .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 +++ .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 +++ .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1811 +++++ .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 1323 +++- .../x/sys/unix/zsyscall_darwin_386.s | 284 + .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1811 +++++ .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 1323 +++- .../x/sys/unix/zsyscall_darwin_amd64.s | 284 + .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1784 +++++ .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 1308 +++- .../x/sys/unix/zsyscall_darwin_arm.s | 282 + .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1784 +++++ .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 1308 +++- .../x/sys/unix/zsyscall_darwin_arm64.s | 282 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 216 +- .../x/sys/unix/zsyscall_freebsd_386.go | 181 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 181 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 181 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 +++++ .../x/sys/unix/zsyscall_illumos_amd64.go | 87 + .../golang.org/x/sys/unix/zsyscall_linux.go | 1841 +++++ .../x/sys/unix/zsyscall_linux_386.go | 1598 +--- .../x/sys/unix/zsyscall_linux_amd64.go | 1624 +--- .../x/sys/unix/zsyscall_linux_arm.go | 1631 +--- .../x/sys/unix/zsyscall_linux_arm64.go | 1534 +--- .../x/sys/unix/zsyscall_linux_mips.go | 1652 +--- .../x/sys/unix/zsyscall_linux_mips64.go | 1588 +--- .../x/sys/unix/zsyscall_linux_mips64le.go | 1588 +--- .../x/sys/unix/zsyscall_linux_mipsle.go | 1652 +--- .../x/sys/unix/zsyscall_linux_ppc64.go | 1579 +--- .../x/sys/unix/zsyscall_linux_ppc64le.go | 1579 +--- .../x/sys/unix/zsyscall_linux_riscv64.go | 582 ++ .../x/sys/unix/zsyscall_linux_s390x.go | 1575 +--- .../x/sys/unix/zsyscall_linux_sparc64.go | 1257 +-- .../x/sys/unix/zsyscall_netbsd_386.go | 529 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 529 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 529 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 1851 +++++ .../x/sys/unix/zsyscall_openbsd_386.go | 316 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 316 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 316 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 1692 ++++ .../x/sys/unix/zsyscall_solaris_amd64.go | 307 +- .../x/sys/unix/zsysctl_openbsd_386.go | 4 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 42 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 6 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_darwin_386.go | 2 +- .../x/sys/unix/zsysnum_darwin_amd64.go | 6 +- .../x/sys/unix/zsysnum_darwin_arm.go | 2 +- .../x/sys/unix/zsysnum_darwin_arm64.go | 2 +- .../x/sys/unix/zsysnum_dragonfly_amd64.go | 176 +- .../x/sys/unix/zsysnum_freebsd_386.go | 731 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 731 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 731 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 396 + .../x/sys/unix/zsysnum_linux_386.go | 810 +- .../x/sys/unix/zsysnum_linux_amd64.go | 18 +- .../x/sys/unix/zsysnum_linux_arm.go | 746 +- .../x/sys/unix/zsysnum_linux_arm64.go | 19 +- .../x/sys/unix/zsysnum_linux_mips.go | 780 +- .../x/sys/unix/zsysnum_linux_mips64.go | 18 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 18 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 780 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 32 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 32 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 302 + .../x/sys/unix/zsysnum_linux_s390x.go | 78 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 35 +- .../x/sys/unix/zsysnum_netbsd_386.go | 4 +- .../x/sys/unix/zsysnum_netbsd_amd64.go | 4 +- .../x/sys/unix/zsysnum_netbsd_arm.go | 4 +- .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 + .../x/sys/unix/zsysnum_openbsd_386.go | 223 +- .../x/sys/unix/zsysnum_openbsd_amd64.go | 223 +- .../x/sys/unix/zsysnum_openbsd_arm.go | 217 +- .../x/sys/unix/zsysnum_openbsd_arm64.go | 217 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 352 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 356 + .../x/sys/unix/ztypes_darwin_386.go | 168 +- .../x/sys/unix/ztypes_darwin_amd64.go | 214 +- .../x/sys/unix/ztypes_darwin_arm.go | 168 +- .../x/sys/unix/ztypes_darwin_arm64.go | 214 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 175 +- .../x/sys/unix/ztypes_freebsd_386.go | 394 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 416 +- .../x/sys/unix/ztypes_freebsd_arm.go | 395 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 690 ++ .../golang.org/x/sys/unix/ztypes_linux.go | 2356 ++++++ .../golang.org/x/sys/unix/ztypes_linux_386.go | 936 +-- .../x/sys/unix/ztypes_linux_amd64.go | 915 +-- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 941 +-- .../x/sys/unix/ztypes_linux_arm64.go | 917 +-- .../x/sys/unix/ztypes_linux_mips.go | 908 +-- .../x/sys/unix/ztypes_linux_mips64.go | 914 +-- .../x/sys/unix/ztypes_linux_mips64le.go | 914 +-- .../x/sys/unix/ztypes_linux_mipsle.go | 908 +-- .../x/sys/unix/ztypes_linux_ppc64.go | 923 +-- .../x/sys/unix/ztypes_linux_ppc64le.go | 923 +-- .../x/sys/unix/ztypes_linux_riscv64.go | 621 ++ .../x/sys/unix/ztypes_linux_s390x.go | 918 +-- .../x/sys/unix/ztypes_linux_sparc64.go | 912 +-- .../x/sys/unix/ztypes_netbsd_386.go | 103 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 110 +- .../x/sys/unix/ztypes_netbsd_arm.go | 109 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 506 ++ .../x/sys/unix/ztypes_openbsd_386.go | 131 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 222 +- .../x/sys/unix/ztypes_openbsd_arm.go | 215 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 565 ++ .../x/sys/unix/ztypes_solaris_amd64.go | 208 +- .../golang.org/x/sys/windows/aliases.go | 13 + .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../golang.org/x/sys/windows/dll_windows.go | 30 +- .../vendor/golang.org/x/sys/windows/empty.s | 8 + .../golang.org/x/sys/windows/env_windows.go | 34 +- .../golang.org/x/sys/windows/mksyscall.go | 4 +- .../golang.org/x/sys/windows/registry/key.go | 198 + .../x/sys/windows/registry/mksyscall.go | 9 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 386 + .../sys/windows/registry/zsyscall_windows.go | 120 + .../x/sys/windows/security_windows.go | 994 ++- .../golang.org/x/sys/windows/service.go | 93 +- .../golang.org/x/sys/windows/svc/service.go | 43 +- .../golang.org/x/sys/windows/svc/sys_386.s | 3 +- .../golang.org/x/sys/windows/svc/sys_amd64.s | 4 +- .../golang.org/x/sys/windows/svc/sys_arm.s | 38 + .../golang.org/x/sys/windows/syscall.go | 3 + .../x/sys/windows/syscall_windows.go | 331 +- .../golang.org/x/sys/windows/types_windows.go | 617 +- .../x/sys/windows/types_windows_arm.go | 22 + .../x/sys/windows/zerrors_windows.go | 6853 +++++++++++++++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 1816 ++++- .../linuxkit/vendor/golang.org/x/time/LICENSE | 27 + .../linuxkit/vendor/golang.org/x/time/PATENTS | 22 + .../vendor/golang.org/x/time/README.md | 17 + .../linuxkit/vendor/golang.org/x/time/go.mod | 1 + .../vendor/golang.org/x/time/rate/rate.go | 400 + .../vendor/google.golang.org/genproto/LICENSE | 202 + .../google.golang.org/genproto/README.md | 31 + .../vendor/google.golang.org/genproto/go.mod | 11 + .../googleapis/rpc/status/status.pb.go | 117 + .../vendor/google.golang.org/grpc/LICENSE | 202 + .../vendor/google.golang.org/grpc/README.md | 141 + .../grpc/codes/code_string.go | 62 + .../google.golang.org/grpc/codes/codes.go | 198 + .../vendor/google.golang.org/grpc/go.mod | 16 + .../grpc/internal/status/status.go | 166 + .../google.golang.org/grpc/status/status.go | 127 + 971 files changed, 153695 insertions(+), 66721 deletions(-) delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/LICENSE delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/README delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/attributes.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/gotty.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/parser.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/types.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/log/context.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/utils.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/dtype_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/path.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/time.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/file_posix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr.go create mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/containerd/continuity/vendor.conf create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/NOTICE create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/set.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go create mode 100644 src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_nolinux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/cli/debug/debug.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/cli/types/types.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/context.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/http.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/logger.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/trace.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/util.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/context/version.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/builder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/manifest.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/sign.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/verify.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/docker/distribution/vendor.conf create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_deprecated.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/client/options.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/client/session.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/counter.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/errors.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/config.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/errors.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/metadata.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push_v2.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/registry.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/download.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/transfer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/upload.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/useragent.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/version_lib.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/image/fs.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/image/image.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/image/rootfs.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/image/store.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/image/v1/imagev1.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/empty.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/migration.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/mounted_layer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/oci/defaults.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/oci/namespaces.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/oci/oci.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/archiver.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/locker.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugingetter/getter.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/client.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/errors.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/http.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progress.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progressreader.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/reexec.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/args_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/useragent.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/settable.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/reference/errors.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/reference/store.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v1.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/certificates.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/ec_key.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/filter.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/hash.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/jsonsign.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/key.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_files.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_manager.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/rsa_key.go create mode 100644 src/cmd/linuxkit/vendor/github.com/docker/libtrust/util.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/createml.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v1.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v2.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/util.go create mode 100644 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/types/types.go create mode 100755 src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/vendor.conf create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/.travis.yml create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/MAINTAINERS create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/README.markdown create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/bluetooth_introspect.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/eavesdrop.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/introspect.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/list-names.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/monitor.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/notification.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/prop.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/server.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/signal.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/_examples/tcp_conn.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/auth.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/auth_anonymous.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/auth_external.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/auth_sha1.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/call.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn_darwin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn_other.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/conn_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/dbus.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/dbus_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/decoder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/decoder_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/default_handler.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/encoder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/encoder_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/examples_test.go create mode 100755 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/exec_command_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/export.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/export_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/go.sum create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/homedir.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/homedir_static.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/introspect/call.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/introspect/introspect.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/introspect/introspectable.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/match.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/match_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/message.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/object.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/object_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/prop/prop.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/proto_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/server_interfaces.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/server_interfaces_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/sig.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/sig_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/store_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_darwin.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_generic.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_nonce_tcp_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_tcp.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_tcp_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unix_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unixcred_dragonfly.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unixcred_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/transport_unixcred_openbsd.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/variant.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/variant_lexer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/variant_parser.go create mode 100644 src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/variant_test.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 src/cmd/linuxkit/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/version.go create mode 100644 src/cmd/linuxkit/vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/aec.go create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/ansi.go create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/builder.go create mode 100644 src/cmd/linuxkit/vendor/github.com/morikuni/aec/sgr.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/go.mod create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/nsenter/cloned_binary.c create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go create mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go delete mode 100644 src/cmd/linuxkit/vendor/github.com/opencontainers/runc/vendor.conf create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/format.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/asm/README.md create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/asm/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/storage/doc.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/storage/entry.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/storage/getter.go create mode 100644 src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/tar/storage/packer.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/go.mod create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/aliases.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{flock.go => fcntl.go} (54%) create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/fcntl_darwin.go rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{flock_linux_32bit.go => fcntl_linux_32bit.go} (100%) create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/fdset.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ioctl.go delete mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/pledge_openbsd.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/unveil_openbsd.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{zptracearm_linux.go => zptrace_armnn_linux.go} (93%) create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{zptracemips_linux.go => zptrace_mipsnn_linux.go} (93%) rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{zptracemipsle_linux.go => zptrace_mipsnnle_linux.go} (93%) rename src/cmd/linuxkit/vendor/golang.org/x/sys/unix/{zptrace386_linux.go => zptrace_x86_linux.go} (95%) create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_linux.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/empty.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/svc/sys_arm.s create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/time/LICENSE create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/time/PATENTS create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/time/README.md create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/time/go.mod create mode 100644 src/cmd/linuxkit/vendor/golang.org/x/time/rate/rate.go create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/genproto/LICENSE create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/genproto/README.md create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/genproto/go.mod create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/LICENSE create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/README.md create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/codes/codes.go create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/go.mod create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 src/cmd/linuxkit/vendor/google.golang.org/grpc/status/status.go diff --git a/src/cmd/linuxkit/pkglib/docker.go b/src/cmd/linuxkit/pkglib/docker.go index 7d9d981eb..057b9d660 100644 --- a/src/cmd/linuxkit/pkglib/docker.go +++ b/src/cmd/linuxkit/pkglib/docker.go @@ -5,16 +5,20 @@ package pkglib //go:generate ./gen import ( - "bytes" "encoding/base64" "fmt" "io" "os" "os/exec" "path" + "strconv" "strings" "github.com/docker/cli/cli/config" + "github.com/docker/distribution/manifest/manifestlist" + dockertypes "github.com/docker/docker/api/types" + "github.com/estesp/manifest-tool/docker" + "github.com/estesp/manifest-tool/types" log "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -149,13 +153,23 @@ func (dr dockerRunner) pushWithManifest(img, suffix string) error { return err } - var trust bool - if dr.dct { - trust = true + auth, err := getDockerAuth() + if err != nil { + return fmt.Errorf("failed to get auth: %v", err) } fmt.Printf("Pushing %s to manifest %s\n", img+suffix, img) - return manifestPush(img, trust) + digest, l, err := manifestPush(img, auth) + if err != nil { + return err + } + // if trust is not enabled, nothing more to do + if !dr.dct { + fmt.Println("trust disabled, not signing") + return nil + } + fmt.Printf("Signing manifest for %s\n", img) + return signManifest(img, digest, l, auth) } func (dr dockerRunner) tag(ref, tag string) error { @@ -178,7 +192,49 @@ func (dr dockerRunner) save(tgt string, refs ...string) error { return dr.command(args...) } -func manifestPush(img string, trust bool) error { +func getDockerAuth() (dockertypes.AuthConfig, error) { + cfgFile := config.LoadDefaultConfigFile(os.Stderr) + return cfgFile.GetAuthConfig(registry) +} + +func manifestPush(img string, auth dockertypes.AuthConfig) (hash string, length int, err error) { + srcImages := []types.ManifestEntry{} + + for i, platform := range platforms { + osArchArr := strings.Split(platform, "/") + if len(osArchArr) != 2 && len(osArchArr) != 3 { + return hash, length, fmt.Errorf("platform argument %d is not of form 'os/arch': '%s'", i, platform) + } + variant := "" + os, arch := osArchArr[0], osArchArr[1] + if len(osArchArr) == 3 { + variant = osArchArr[2] + } + srcImages = append(srcImages, types.ManifestEntry{ + Image: fmt.Sprintf("%s-%s", img, arch), + Platform: manifestlist.PlatformSpec{ + OS: os, + Architecture: arch, + Variant: variant, + }, + }) + } + + yamlInput := types.YAMLInput{ + Image: img, + Manifests: srcImages, + } + + a := types.AuthInfo{ + Username: auth.Username, + Password: auth.Password, + } + + // push the manifest list with the auth as given, ignore missing, do not allow insecure + return docker.PutManifestList(&a, yamlInput, true, false) +} + +func signManifest(img, digest string, length int, auth dockertypes.AuthConfig) error { imgParts := strings.Split(img, ":") if len(imgParts) < 2 { return fmt.Errorf("image not composed of : '%s'", img) @@ -186,64 +242,18 @@ func manifestPush(img string, trust bool) error { repo := imgParts[0] tag := imgParts[1] - cfgFile := config.LoadDefaultConfigFile(os.Stderr) - auth, err := cfgFile.GetAuthConfig(registry) - if err != nil { - return fmt.Errorf("unable to get auth for %s: %v", registry, err) + digestParts := strings.Split(digest, ":") + if len(digestParts) < 2 { + return fmt.Errorf("digest not composed of : '%s'", digest) } - - args := []string{ - "push", - "from-args", - "--ignore-missing", - "--platforms", - strings.Join(platforms, ","), - "--template", - fmt.Sprintf("%s-ARCH", img), - "--target", - img, + algo, hash := digestParts[0], digestParts[1] + if algo != "sha256" { + return fmt.Errorf("notary works with sha256 hash, not the provided %s", algo) } - manTool := "manifest-tool" - // we do this separately to avoid printing username and password to debug output - log.Debugf("Executing (will add username/password): %v", append([]string{manTool}, args...)) - args = append([]string{ - "--username", - auth.Username, - "--password", - auth.Password, - }, args...) - cmd := exec.Command(manTool, args...) - - var stdout bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = os.Stderr - cmd.Env = os.Environ() - - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to execute manifest-tool: %v", err) - } - - if !trust { - fmt.Printf("trust disabled, not signing %s\n", img) - return nil - } - - // get the image hash and the length from the manifest tool output - manToolOut := string(stdout.Bytes()) - manToolOutParts := strings.Fields(manToolOut) - if len(manToolOutParts) < 3 { - return fmt.Errorf("manifest-tool output was less then required 3 parts '%s'", manToolOut) - } - hashParts := strings.Split(manToolOutParts[1], ":") - if len(hashParts) < 2 { - return fmt.Errorf("manifest-tool output hash was not in format : '%s'", manToolOutParts[1]) - } - hash := hashParts[1] - length := manToolOutParts[2] notaryAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", auth.Username, auth.Password))) // run the notary command to sign - args = []string{ + args := []string{ "-s", notaryServer, "-d", @@ -252,13 +262,13 @@ func manifestPush(img string, trust bool) error { "-p", fmt.Sprintf("docker.io/%s", repo), tag, - length, + strconv.Itoa(length), "--sha256", hash, "-r", "targets/releases", } - cmd = exec.Command("notary", args...) + cmd := exec.Command("notary", args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", notaryDelegationPassphraseEnvVar, os.Getenv(dctEnvVar)), fmt.Sprintf("%s=%s", notaryAuthEnvVar, notaryAuth)) diff --git a/src/cmd/linuxkit/vendor.conf b/src/cmd/linuxkit/vendor.conf index 924d20fe6..e1dea1533 100644 --- a/src/cmd/linuxkit/vendor.conf +++ b/src/cmd/linuxkit/vendor.conf @@ -9,40 +9,53 @@ github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403 github.com/aws/aws-sdk-go fa107560b5f3528a859a1a1511086646731bb1a8 github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 -github.com/containerd/containerd v1.1.2 -github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 +github.com/containerd/containerd v1.3.3 +github.com/containerd/continuity 0f16d7a0959cac64d7a54ce015e50cf4839d1970 +github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf +github.com/containerd/ttrpc 0be804eadb152bc3b3c20c5edc314c4633833398 +github.com/containerd/typeurl 102fdb1d150dc56f98a7c856d441925d24a5757c +github.com/coreos/go-systemd/v22 v22.0.0 github.com/creack/goselect 58854f77ee8d858ce751b0a9bcc5533fef7bfa9e github.com/davecgh/go-spew v1.1.0 +github.com/godbus/dbus/v5 v5.0.3 github.com/dchest/bcrypt_pbkdf 83f37f9c154a678179d11e218bff73ebe5717f99 github.com/dgrijalva/jwt-go 6c8dedd55f8a2e41f605de6d5d66e51ed1f299fc -github.com/docker/cli v18.06.0-ce -github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 -github.com/docker/docker b711437bbd8596312c962d4189e9ad4d2108c2dc +github.com/docker/cli v18.09.9 +github.com/docker/distribution 742aab907b54a367e1ac7033fb9fe73b0e7344f5 +github.com/docker/docker 71e07f91307a9cb51071c6510768139c1f436750 github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962 github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 +github.com/docker/go-events e31b211e4f1cd09aa76fe4ac244571fab96ae47f github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20 +github.com/estesp/manifest-tool fa20a3b9b43f7c1acedb8d97c249803cc923e009 github.com/go-ini/ini afbc45e87f3ba324c532d12c71918ef52e0fb194 -github.com/gogo/protobuf v1.0.0 -github.com/golang/protobuf v1.1.0 +github.com/gogo/protobuf v1.3.1 +github.com/gogo/googleapis v1.3.2 +github.com/golang/protobuf v1.3.5 github.com/google/uuid 7e072fc3a7be179aee6d3359e46015aa8c995314 github.com/gophercloud/gophercloud b9ea9cb68cf5803ea1567c404b549a783c8264b2 github.com/gophercloud/utils 34f5991525d116b3832e0d9409492274f1c06bda github.com/gorilla/context v1.1 github.com/gorilla/mux v1.1 github.com/gorilla/websocket 21ab95fa12b9bdd8fecf5fa3586aad941cc98785 +github.com/hashicorp/go-version v1.2.0 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d github.com/linuxkit/virtsock 8e79449dea0735c1c056d814934dd035734cc97c github.com/matttproud/golang_protobuf_extensions v1.0.0 +github.com/mattn/go-shellwords v1.0.10 github.com/mitchellh/go-ps 4fdf99ab29366514c69ccccddab5dc58b8d84062 github.com/moby/hyperkit d65b09c1c28a2bfb6a976c86ecd885d2ee4c71d3 github.com/moby/vpnkit 2ffc1dd8a84ea7359dd09b1f4b51bb728d4f46a0 +github.com/moby/sys 6154f11e6840c0d6b0dbb23f4125a6134b3013c9 +github.com/morikuni/aec v1.0.0 github.com/moul/gotty-client e5589f6df35953284b091b8394daa6be6c453469 github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1 +github.com/opencontainers/runc ccbb3364d49d2dc6d9f057134570b0f382f6ceb7 github.com/opencontainers/runtime-spec v1.0.1 github.com/packethost/packngo f1be085ecd6fca1b0a0e25eda71f208dcfcee5ab github.com/pkg/errors v0.8.0 @@ -60,7 +73,9 @@ github.com/spf13/cobra v0.0.3 github.com/spf13/pflag v1.0.1 github.com/stretchr/testify v1.1.4 github.com/surma/gocpio fcb68777e7dc4ea43ffce871b552c0d073c17495 +github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 github.com/theupdateframework/notary v0.6.0 +github.com/vbatts/tar-split v0.11.1 github.com/vmware/govmomi 6f8ebd89d521d9f9af7a6c2219c4deee511020dd github.com/xeipuuv/gojsonpointer 6fe8760cad3569743d51ddbb243b26f8456742dc github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 @@ -69,6 +84,11 @@ golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd golang.org/x/oauth2 1611bb46e67abc64a71ecc5c3ae67f1cbbc2b921 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 -golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd +golang.org/x/sys 59c9f1ba88faf592b225274f69c5ef1e4ebacf82 +golang.org/x/text v0.3.2 +golang.org/x/time 555d28b269f0569763d25dbe1a237ae74c6bcc82 google.golang.org/api 373a4c220f5c90e5b7ff7101779c5be385d171be +google.golang.org/genproto 0848e9f44c368a0543357679c19d4eab1177012f +google.golang.org/grpc 27096e8260a4cbde58a8578f3a2fadd723210ba7 gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 +vbom.ml/util efcd4e0f97874370259c7d93e12aad57911dea81 diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/LICENSE b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/LICENSE deleted file mode 100644 index 0b71c9736..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of the FreeBSD Project. diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/README b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/README deleted file mode 100644 index a6b0d9a8f..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/attributes.go b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/attributes.go deleted file mode 100644 index a4c005fae..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/attributes.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -// Boolean capabilities -var BoolAttr = [...]string{ - "auto_left_margin", "bw", - "auto_right_margin", "am", - "no_esc_ctlc", "xsb", - "ceol_standout_glitch", "xhp", - "eat_newline_glitch", "xenl", - "erase_overstrike", "eo", - "generic_type", "gn", - "hard_copy", "hc", - "has_meta_key", "km", - "has_status_line", "hs", - "insert_null_glitch", "in", - "memory_above", "da", - "memory_below", "db", - "move_insert_mode", "mir", - "move_standout_mode", "msgr", - "over_strike", "os", - "status_line_esc_ok", "eslok", - "dest_tabs_magic_smso", "xt", - "tilde_glitch", "hz", - "transparent_underline", "ul", - "xon_xoff", "nxon", - "needs_xon_xoff", "nxon", - "prtr_silent", "mc5i", - "hard_cursor", "chts", - "non_rev_rmcup", "nrrmc", - "no_pad_char", "npc", - "non_dest_scroll_region", "ndscr", - "can_change", "ccc", - "back_color_erase", "bce", - "hue_lightness_saturation", "hls", - "col_addr_glitch", "xhpa", - "cr_cancels_micro_mode", "crxm", - "has_print_wheel", "daisy", - "row_addr_glitch", "xvpa", - "semi_auto_right_margin", "sam", - "cpi_changes_res", "cpix", - "lpi_changes_res", "lpix", - "backspaces_with_bs", "", - "crt_no_scrolling", "", - "no_correctly_working_cr", "", - "gnu_has_meta_key", "", - "linefeed_is_newline", "", - "has_hardware_tabs", "", - "return_does_clr_eol", "", -} - -// Numerical capabilities -var NumAttr = [...]string{ - "columns", "cols", - "init_tabs", "it", - "lines", "lines", - "lines_of_memory", "lm", - "magic_cookie_glitch", "xmc", - "padding_baud_rate", "pb", - "virtual_terminal", "vt", - "width_status_line", "wsl", - "num_labels", "nlab", - "label_height", "lh", - "label_width", "lw", - "max_attributes", "ma", - "maximum_windows", "wnum", - "max_colors", "colors", - "max_pairs", "pairs", - "no_color_video", "ncv", - "buffer_capacity", "bufsz", - "dot_vert_spacing", "spinv", - "dot_horz_spacing", "spinh", - "max_micro_address", "maddr", - "max_micro_jump", "mjump", - "micro_col_size", "mcs", - "micro_line_size", "mls", - "number_of_pins", "npins", - "output_res_char", "orc", - "output_res_line", "orl", - "output_res_horz_inch", "orhi", - "output_res_vert_inch", "orvi", - "print_rate", "cps", - "wide_char_size", "widcs", - "buttons", "btns", - "bit_image_entwining", "bitwin", - "bit_image_type", "bitype", - "magic_cookie_glitch_ul", "", - "carriage_return_delay", "", - "new_line_delay", "", - "backspace_delay", "", - "horizontal_tab_delay", "", - "number_of_function_keys", "", -} - -// String capabilities -var StrAttr = [...]string{ - "back_tab", "cbt", - "bell", "bel", - "carriage_return", "cr", - "change_scroll_region", "csr", - "clear_all_tabs", "tbc", - "clear_screen", "clear", - "clr_eol", "el", - "clr_eos", "ed", - "column_address", "hpa", - "command_character", "cmdch", - "cursor_address", "cup", - "cursor_down", "cud1", - "cursor_home", "home", - "cursor_invisible", "civis", - "cursor_left", "cub1", - "cursor_mem_address", "mrcup", - "cursor_normal", "cnorm", - "cursor_right", "cuf1", - "cursor_to_ll", "ll", - "cursor_up", "cuu1", - "cursor_visible", "cvvis", - "delete_character", "dch1", - "delete_line", "dl1", - "dis_status_line", "dsl", - "down_half_line", "hd", - "enter_alt_charset_mode", "smacs", - "enter_blink_mode", "blink", - "enter_bold_mode", "bold", - "enter_ca_mode", "smcup", - "enter_delete_mode", "smdc", - "enter_dim_mode", "dim", - "enter_insert_mode", "smir", - "enter_secure_mode", "invis", - "enter_protected_mode", "prot", - "enter_reverse_mode", "rev", - "enter_standout_mode", "smso", - "enter_underline_mode", "smul", - "erase_chars", "ech", - "exit_alt_charset_mode", "rmacs", - "exit_attribute_mode", "sgr0", - "exit_ca_mode", "rmcup", - "exit_delete_mode", "rmdc", - "exit_insert_mode", "rmir", - "exit_standout_mode", "rmso", - "exit_underline_mode", "rmul", - "flash_screen", "flash", - "form_feed", "ff", - "from_status_line", "fsl", - "init_1string", "is1", - "init_2string", "is2", - "init_3string", "is3", - "init_file", "if", - "insert_character", "ich1", - "insert_line", "il1", - "insert_padding", "ip", - "key_backspace", "kbs", - "key_catab", "ktbc", - "key_clear", "kclr", - "key_ctab", "kctab", - "key_dc", "kdch1", - "key_dl", "kdl1", - "key_down", "kcud1", - "key_eic", "krmir", - "key_eol", "kel", - "key_eos", "ked", - "key_f0", "kf0", - "key_f1", "kf1", - "key_f10", "kf10", - "key_f2", "kf2", - "key_f3", "kf3", - "key_f4", "kf4", - "key_f5", "kf5", - "key_f6", "kf6", - "key_f7", "kf7", - "key_f8", "kf8", - "key_f9", "kf9", - "key_home", "khome", - "key_ic", "kich1", - "key_il", "kil1", - "key_left", "kcub1", - "key_ll", "kll", - "key_npage", "knp", - "key_ppage", "kpp", - "key_right", "kcuf1", - "key_sf", "kind", - "key_sr", "kri", - "key_stab", "khts", - "key_up", "kcuu1", - "keypad_local", "rmkx", - "keypad_xmit", "smkx", - "lab_f0", "lf0", - "lab_f1", "lf1", - "lab_f10", "lf10", - "lab_f2", "lf2", - "lab_f3", "lf3", - "lab_f4", "lf4", - "lab_f5", "lf5", - "lab_f6", "lf6", - "lab_f7", "lf7", - "lab_f8", "lf8", - "lab_f9", "lf9", - "meta_off", "rmm", - "meta_on", "smm", - "newline", "_glitch", - "pad_char", "npc", - "parm_dch", "dch", - "parm_delete_line", "dl", - "parm_down_cursor", "cud", - "parm_ich", "ich", - "parm_index", "indn", - "parm_insert_line", "il", - "parm_left_cursor", "cub", - "parm_right_cursor", "cuf", - "parm_rindex", "rin", - "parm_up_cursor", "cuu", - "pkey_key", "pfkey", - "pkey_local", "pfloc", - "pkey_xmit", "pfx", - "print_screen", "mc0", - "prtr_off", "mc4", - "prtr_on", "mc5", - "repeat_char", "rep", - "reset_1string", "rs1", - "reset_2string", "rs2", - "reset_3string", "rs3", - "reset_file", "rf", - "restore_cursor", "rc", - "row_address", "mvpa", - "save_cursor", "row_address", - "scroll_forward", "ind", - "scroll_reverse", "ri", - "set_attributes", "sgr", - "set_tab", "hts", - "set_window", "wind", - "tab", "s_magic_smso", - "to_status_line", "tsl", - "underline_char", "uc", - "up_half_line", "hu", - "init_prog", "iprog", - "key_a1", "ka1", - "key_a3", "ka3", - "key_b2", "kb2", - "key_c1", "kc1", - "key_c3", "kc3", - "prtr_non", "mc5p", - "char_padding", "rmp", - "acs_chars", "acsc", - "plab_norm", "pln", - "key_btab", "kcbt", - "enter_xon_mode", "smxon", - "exit_xon_mode", "rmxon", - "enter_am_mode", "smam", - "exit_am_mode", "rmam", - "xon_character", "xonc", - "xoff_character", "xoffc", - "ena_acs", "enacs", - "label_on", "smln", - "label_off", "rmln", - "key_beg", "kbeg", - "key_cancel", "kcan", - "key_close", "kclo", - "key_command", "kcmd", - "key_copy", "kcpy", - "key_create", "kcrt", - "key_end", "kend", - "key_enter", "kent", - "key_exit", "kext", - "key_find", "kfnd", - "key_help", "khlp", - "key_mark", "kmrk", - "key_message", "kmsg", - "key_move", "kmov", - "key_next", "knxt", - "key_open", "kopn", - "key_options", "kopt", - "key_previous", "kprv", - "key_print", "kprt", - "key_redo", "krdo", - "key_reference", "kref", - "key_refresh", "krfr", - "key_replace", "krpl", - "key_restart", "krst", - "key_resume", "kres", - "key_save", "ksav", - "key_suspend", "kspd", - "key_undo", "kund", - "key_sbeg", "kBEG", - "key_scancel", "kCAN", - "key_scommand", "kCMD", - "key_scopy", "kCPY", - "key_screate", "kCRT", - "key_sdc", "kDC", - "key_sdl", "kDL", - "key_select", "kslt", - "key_send", "kEND", - "key_seol", "kEOL", - "key_sexit", "kEXT", - "key_sfind", "kFND", - "key_shelp", "kHLP", - "key_shome", "kHOM", - "key_sic", "kIC", - "key_sleft", "kLFT", - "key_smessage", "kMSG", - "key_smove", "kMOV", - "key_snext", "kNXT", - "key_soptions", "kOPT", - "key_sprevious", "kPRV", - "key_sprint", "kPRT", - "key_sredo", "kRDO", - "key_sreplace", "kRPL", - "key_sright", "kRIT", - "key_srsume", "kRES", - "key_ssave", "kSAV", - "key_ssuspend", "kSPD", - "key_sundo", "kUND", - "req_for_input", "rfi", - "key_f11", "kf11", - "key_f12", "kf12", - "key_f13", "kf13", - "key_f14", "kf14", - "key_f15", "kf15", - "key_f16", "kf16", - "key_f17", "kf17", - "key_f18", "kf18", - "key_f19", "kf19", - "key_f20", "kf20", - "key_f21", "kf21", - "key_f22", "kf22", - "key_f23", "kf23", - "key_f24", "kf24", - "key_f25", "kf25", - "key_f26", "kf26", - "key_f27", "kf27", - "key_f28", "kf28", - "key_f29", "kf29", - "key_f30", "kf30", - "key_f31", "kf31", - "key_f32", "kf32", - "key_f33", "kf33", - "key_f34", "kf34", - "key_f35", "kf35", - "key_f36", "kf36", - "key_f37", "kf37", - "key_f38", "kf38", - "key_f39", "kf39", - "key_f40", "kf40", - "key_f41", "kf41", - "key_f42", "kf42", - "key_f43", "kf43", - "key_f44", "kf44", - "key_f45", "kf45", - "key_f46", "kf46", - "key_f47", "kf47", - "key_f48", "kf48", - "key_f49", "kf49", - "key_f50", "kf50", - "key_f51", "kf51", - "key_f52", "kf52", - "key_f53", "kf53", - "key_f54", "kf54", - "key_f55", "kf55", - "key_f56", "kf56", - "key_f57", "kf57", - "key_f58", "kf58", - "key_f59", "kf59", - "key_f60", "kf60", - "key_f61", "kf61", - "key_f62", "kf62", - "key_f63", "kf63", - "clr_bol", "el1", - "clear_margins", "mgc", - "set_left_margin", "smgl", - "set_right_margin", "smgr", - "label_format", "fln", - "set_clock", "sclk", - "display_clock", "dclk", - "remove_clock", "rmclk", - "create_window", "cwin", - "goto_window", "wingo", - "hangup", "hup", - "dial_phone", "dial", - "quick_dial", "qdial", - "tone", "tone", - "pulse", "pulse", - "flash_hook", "hook", - "fixed_pause", "pause", - "wait_tone", "wait", - "user0", "u0", - "user1", "u1", - "user2", "u2", - "user3", "u3", - "user4", "u4", - "user5", "u5", - "user6", "u6", - "user7", "u7", - "user8", "u8", - "user9", "u9", - "orig_pair", "op", - "orig_colors", "oc", - "initialize_color", "initc", - "initialize_pair", "initp", - "set_color_pair", "scp", - "set_foreground", "setf", - "set_background", "setb", - "change_char_pitch", "cpi", - "change_line_pitch", "lpi", - "change_res_horz", "chr", - "change_res_vert", "cvr", - "define_char", "defc", - "enter_doublewide_mode", "swidm", - "enter_draft_quality", "sdrfq", - "enter_italics_mode", "sitm", - "enter_leftward_mode", "slm", - "enter_micro_mode", "smicm", - "enter_near_letter_quality", "snlq", - "enter_normal_quality", "snrmq", - "enter_shadow_mode", "sshm", - "enter_subscript_mode", "ssubm", - "enter_superscript_mode", "ssupm", - "enter_upward_mode", "sum", - "exit_doublewide_mode", "rwidm", - "exit_italics_mode", "ritm", - "exit_leftward_mode", "rlm", - "exit_micro_mode", "rmicm", - "exit_shadow_mode", "rshm", - "exit_subscript_mode", "rsubm", - "exit_superscript_mode", "rsupm", - "exit_upward_mode", "rum", - "micro_column_address", "mhpa", - "micro_down", "mcud1", - "micro_left", "mcub1", - "micro_right", "mcuf1", - "micro_row_address", "mvpa", - "micro_up", "mcuu1", - "order_of_pins", "porder", - "parm_down_micro", "mcud", - "parm_left_micro", "mcub", - "parm_right_micro", "mcuf", - "parm_up_micro", "mcuu", - "select_char_set", "scs", - "set_bottom_margin", "smgb", - "set_bottom_margin_parm", "smgbp", - "set_left_margin_parm", "smglp", - "set_right_margin_parm", "smgrp", - "set_top_margin", "smgt", - "set_top_margin_parm", "smgtp", - "start_bit_image", "sbim", - "start_char_set_def", "scsd", - "stop_bit_image", "rbim", - "stop_char_set_def", "rcsd", - "subscript_characters", "subcs", - "superscript_characters", "supcs", - "these_cause_cr", "docr", - "zero_motion", "zerom", - "char_set_names", "csnm", - "key_mouse", "kmous", - "mouse_info", "minfo", - "req_mouse_pos", "reqmp", - "get_mouse", "getm", - "set_a_foreground", "setaf", - "set_a_background", "setab", - "pkey_plab", "pfxl", - "device_type", "devt", - "code_set_init", "csin", - "set0_des_seq", "s0ds", - "set1_des_seq", "s1ds", - "set2_des_seq", "s2ds", - "set3_des_seq", "s3ds", - "set_lr_margin", "smglr", - "set_tb_margin", "smgtb", - "bit_image_repeat", "birep", - "bit_image_newline", "binel", - "bit_image_carriage_return", "bicr", - "color_names", "colornm", - "define_bit_image_region", "defbi", - "end_bit_image_region", "endbi", - "set_color_band", "setcolor", - "set_page_length", "slines", - "display_pc_char", "dispc", - "enter_pc_charset_mode", "smpch", - "exit_pc_charset_mode", "rmpch", - "enter_scancode_mode", "smsc", - "exit_scancode_mode", "rmsc", - "pc_term_options", "pctrm", - "scancode_escape", "scesc", - "alt_scancode_esc", "scesa", - "enter_horizontal_hl_mode", "ehhlm", - "enter_left_hl_mode", "elhlm", - "enter_low_hl_mode", "elohlm", - "enter_right_hl_mode", "erhlm", - "enter_top_hl_mode", "ethlm", - "enter_vertical_hl_mode", "evhlm", - "set_a_attributes", "sgr1", - "set_pglen_inch", "slength", - "termcap_init2", "", - "termcap_reset", "", - "linefeed_if_not_lf", "", - "backspace_if_not_bs", "", - "other_non_function_keys", "", - "arrow_key_map", "", - "acs_ulcorner", "", - "acs_llcorner", "", - "acs_urcorner", "", - "acs_lrcorner", "", - "acs_ltee", "", - "acs_rtee", "", - "acs_btee", "", - "acs_ttee", "", - "acs_hline", "", - "acs_vline", "", - "acs_plus", "", - "memory_lock", "", - "memory_unlock", "", - "box_chars_1", "", -} diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/gotty.go b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/gotty.go deleted file mode 100644 index c329778a1..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/gotty.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Gotty is a Go-package for reading and parsing the terminfo database -package gotty - -// TODO add more concurrency to name lookup, look for more opportunities. - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "os" - "path" - "reflect" - "strings" - "sync" -) - -// Open a terminfo file by the name given and construct a TermInfo object. -// If something went wrong reading the terminfo database file, an error is -// returned. -func OpenTermInfo(termName string) (*TermInfo, error) { - if len(termName) == 0 { - return nil, errors.New("No termname given") - } - // Find the environment variables - if termloc := os.Getenv("TERMINFO"); len(termloc) > 0 { - return readTermInfo(path.Join(termloc, string(termName[0]), termName)) - } else { - // Search like ncurses - locations := []string{} - if h := os.Getenv("HOME"); len(h) > 0 { - locations = append(locations, path.Join(h, ".terminfo")) - } - locations = append(locations, - "/etc/terminfo/", - "/lib/terminfo/", - "/usr/share/terminfo/") - for _, str := range locations { - term, err := readTermInfo(path.Join(str, string(termName[0]), termName)) - if err == nil { - return term, nil - } - } - return nil, errors.New("No terminfo file(-location) found") - } -} - -// Open a terminfo file from the environment variable containing the current -// terminal name and construct a TermInfo object. If something went wrong -// reading the terminfo database file, an error is returned. -func OpenTermInfoEnv() (*TermInfo, error) { - termenv := os.Getenv("TERM") - return OpenTermInfo(termenv) -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. -func (term *TermInfo) GetAttribute(attr string) (stacker, error) { - // Channel to store the main value in. - var value stacker - // Add a blocking WaitGroup - var block sync.WaitGroup - // Keep track of variable being written. - written := false - // Function to put into goroutine. - f := func(ats interface{}) { - var ok bool - var v stacker - // Switch on type of map to use and assign value to it. - switch reflect.TypeOf(ats).Elem().Kind() { - case reflect.Bool: - v, ok = ats.(map[string]bool)[attr] - case reflect.Int16: - v, ok = ats.(map[string]int16)[attr] - case reflect.String: - v, ok = ats.(map[string]string)[attr] - } - // If ok, a value is found, so we can write. - if ok { - value = v - written = true - } - // Goroutine is done - block.Done() - } - block.Add(3) - // Go for all 3 attribute lists. - go f(term.boolAttributes) - go f(term.numAttributes) - go f(term.strAttributes) - // Wait until every goroutine is done. - block.Wait() - // If a value has been written, return it. - if written { - return value, nil - } - // Otherwise, error. - return nil, fmt.Errorf("Erorr finding attribute") -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. A name is first converted to its termcap value. -func (term *TermInfo) GetAttributeName(name string) (stacker, error) { - tc := GetTermcapName(name) - return term.GetAttribute(tc) -} - -// A utility function that finds and returns the termcap equivalent of a -// variable name. -func GetTermcapName(name string) string { - // Termcap name - var tc string - // Blocking group - var wait sync.WaitGroup - // Function to put into a goroutine - f := func(attrs []string) { - // Find the string corresponding to the name - for i, s := range attrs { - if s == name { - tc = attrs[i+1] - } - } - // Goroutine is finished - wait.Done() - } - wait.Add(3) - // Go for all 3 attribute lists - go f(BoolAttr[:]) - go f(NumAttr[:]) - go f(StrAttr[:]) - // Wait until every goroutine is done - wait.Wait() - // Return the termcap name - return tc -} - -// This function takes a path to a terminfo file and reads it in binary -// form to construct the actual TermInfo file. -func readTermInfo(path string) (*TermInfo, error) { - // Open the terminfo file - file, err := os.Open(path) - defer file.Close() - if err != nil { - return nil, err - } - - // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize - // Header is composed of the magic 0432 octal number, size of the name - // section, size of the boolean section, the amount of number values, - // the number of offsets of strings, and the size of the string section. - var header [6]int16 - // Byte array is used to read in byte values - var byteArray []byte - // Short array is used to read in short values - var shArray []int16 - // TermInfo object to store values - var term TermInfo - - // Read in the header - err = binary.Read(file, binary.LittleEndian, &header) - if err != nil { - return nil, err - } - // If magic number isn't there or isn't correct, we have the wrong filetype - if header[0] != 0432 { - return nil, errors.New(fmt.Sprintf("Wrong filetype")) - } - - // Read in the names - byteArray = make([]byte, header[1]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.Names = strings.Split(string(byteArray), "|") - - // Read in the booleans - byteArray = make([]byte, header[2]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.boolAttributes = make(map[string]bool) - for i, b := range byteArray { - if b == 1 { - term.boolAttributes[BoolAttr[i*2+1]] = true - } - } - // If the number of bytes read is not even, a byte for alignment is added - // We know the header is an even number of bytes so only need to check the - // total of the names and booleans. - if (header[1]+header[2])%2 != 0 { - err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) - if err != nil { - return nil, err - } - } - - // Read in shorts - shArray = make([]int16, header[3]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - term.numAttributes = make(map[string]int16) - for i, n := range shArray { - if n != 0377 && n > -1 { - term.numAttributes[NumAttr[i*2+1]] = n - } - } - - // Read the offsets into the short array - shArray = make([]int16, header[4]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - // Read the actual strings in the byte array - byteArray = make([]byte, header[5]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.strAttributes = make(map[string]string) - // We get an offset, and then iterate until the string is null-terminated - for i, offset := range shArray { - if offset > -1 { - if int(offset) >= len(byteArray) { - return nil, errors.New("array out of bounds reading string section") - } - r := bytes.IndexByte(byteArray[offset:], 0) - if r == -1 { - return nil, errors.New("missing nul byte reading string section") - } - r += int(offset) - term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) - } - } - return &term, nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/parser.go b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/parser.go deleted file mode 100644 index a9d5d23c5..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/parser.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var exp = [...]string{ - "%%", - "%c", - "%s", - "%p(\\d)", - "%P([A-z])", - "%g([A-z])", - "%'(.)'", - "%{([0-9]+)}", - "%l", - "%\\+|%-|%\\*|%/|%m", - "%&|%\\||%\\^", - "%=|%>|%<", - "%A|%O", - "%!|%~", - "%i", - "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", - "%\\?(.*?);", -} - -var regex *regexp.Regexp -var staticVar map[byte]stacker - -// Parses the attribute that is received with name attr and parameters params. -func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { - // Get the attribute name first. - iface, err := term.GetAttribute(attr) - str, ok := iface.(string) - if err != nil { - return "", err - } - if !ok { - return str, errors.New("Only string capabilities can be parsed.") - } - // Construct the hidden parser struct so we can use a recursive stack based - // parser. - ps := &parser{} - // Dynamic variables only exist in this context. - ps.dynamicVar = make(map[byte]stacker, 26) - ps.parameters = make([]stacker, len(params)) - // Convert the parameters to insert them into the parser struct. - for i, x := range params { - ps.parameters[i] = x - } - // Recursively walk and return. - result, err := ps.walk(str) - return result, err -} - -// Parses the attribute that is received with name attr and parameters params. -// Only works on full name of a capability that is given, which it uses to -// search for the termcap name. -func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { - tc := GetTermcapName(attr) - return term.Parse(tc, params) -} - -// Identify each token in a stack based manner and do the actual parsing. -func (ps *parser) walk(attr string) (string, error) { - // We use a buffer to get the modified string. - var buf bytes.Buffer - // Next, find and identify all tokens by their indices and strings. - tokens := regex.FindAllStringSubmatch(attr, -1) - if len(tokens) == 0 { - return attr, nil - } - indices := regex.FindAllStringIndex(attr, -1) - q := 0 // q counts the matches of one token - // Iterate through the string per character. - for i := 0; i < len(attr); i++ { - // If the current position is an identified token, execute the following - // steps. - if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { - // Switch on token. - switch { - case tokens[q][0][:2] == "%%": - // Literal percentage character. - buf.WriteByte('%') - case tokens[q][0][:2] == "%c": - // Pop a character. - c, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - buf.WriteByte(c.(byte)) - case tokens[q][0][:2] == "%s": - // Pop a string. - str, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := str.(string); !ok { - return buf.String(), errors.New("Stack head is not a string") - } - buf.WriteString(str.(string)) - case tokens[q][0][:2] == "%p": - // Push a parameter on the stack. - index, err := strconv.ParseInt(tokens[q][1], 10, 8) - index-- - if err != nil { - return buf.String(), err - } - if int(index) >= len(ps.parameters) { - return buf.String(), errors.New("Parameters index out of bound") - } - ps.st.push(ps.parameters[index]) - case tokens[q][0][:2] == "%P": - // Pop a variable from the stack as a dynamic or static variable. - val, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - index := tokens[q][2] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", - index) - return buf.String(), errors.New(errorStr) - } - // Specify either dynamic or static. - if index[0] >= 'a' && index[0] <= 'z' { - ps.dynamicVar[index[0]] = val - } else if index[0] >= 'A' && index[0] <= 'Z' { - staticVar[index[0]] = val - } - case tokens[q][0][:2] == "%g": - // Push a variable from the stack as a dynamic or static variable. - index := tokens[q][3] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid static variables index", - index) - return buf.String(), errors.New(errorStr) - } - var val stacker - if index[0] >= 'a' && index[0] <= 'z' { - val = ps.dynamicVar[index[0]] - } else if index[0] >= 'A' && index[0] <= 'Z' { - val = staticVar[index[0]] - } - ps.st.push(val) - case tokens[q][0][:2] == "%'": - // Push a character constant. - con := tokens[q][4] - if len(con) > 1 { - errorStr := fmt.Sprintf("%s is not a valid character constant", con) - return buf.String(), errors.New(errorStr) - } - ps.st.push(con[0]) - case tokens[q][0][:2] == "%{": - // Push an integer constant. - con, err := strconv.ParseInt(tokens[q][5], 10, 32) - if err != nil { - return buf.String(), err - } - ps.st.push(con) - case tokens[q][0][:2] == "%l": - // Push the length of the string that is popped from the stack. - popStr, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := popStr.(string); !ok { - errStr := fmt.Sprintf("Stack head is not a string") - return buf.String(), errors.New(errStr) - } - ps.st.push(len(popStr.(string))) - case tokens[q][0][:2] == "%?": - // If-then-else construct. First, the whole string is identified and - // then inside this substring, we can specify which parts to switch on. - ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") - ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) - var ( - ifStr string - err error - ) - // Parse the if-part to determine if-else. - if len(ifTokens[1]) > 0 { - ifStr, err = ps.walk(ifTokens[1]) - } else { // else - ifStr, err = ps.walk(ifTokens[4]) - } - // Return any errors - if err != nil { - return buf.String(), err - } else if len(ifStr) > 0 { - // Self-defined limitation, not sure if this is correct, but didn't - // seem like it. - return buf.String(), errors.New("If-clause cannot print statements") - } - var thenStr string - // Pop the first value that is set by parsing the if-clause. - choose, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // Switch to if or else. - if choose.(int) == 0 && len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[3]) - } else if choose.(int) != 0 { - if len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[2]) - } else { - thenStr, err = ps.walk(ifTokens[5]) - } - } - if err != nil { - return buf.String(), err - } - buf.WriteString(thenStr) - case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'x': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'X': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 's': - token := tokens[q][0] - // Remove the : that comes before a flag. - if token[1] == ':' { - token = token[:1] + token[2:] - } - digit, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // The rest is determined like the normal formatted prints. - digitStr := fmt.Sprintf(token, digit.(int)) - buf.WriteString(digitStr) - case tokens[q][0][:2] == "%i": - // Increment the parameters by one. - if len(ps.parameters) < 2 { - return buf.String(), errors.New("Not enough parameters to increment.") - } - val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) - val1++ - val2++ - ps.parameters[0], ps.parameters[1] = val1, val2 - default: - // The rest of the tokens is a special case, where two values are - // popped and then operated on by the token that comes after them. - op1, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - op2, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - var result stacker - switch tokens[q][0][:2] { - case "%+": - // Addition - result = op2.(int) + op1.(int) - case "%-": - // Subtraction - result = op2.(int) - op1.(int) - case "%*": - // Multiplication - result = op2.(int) * op1.(int) - case "%/": - // Division - result = op2.(int) / op1.(int) - case "%m": - // Modulo - result = op2.(int) % op1.(int) - case "%&": - // Bitwise AND - result = op2.(int) & op1.(int) - case "%|": - // Bitwise OR - result = op2.(int) | op1.(int) - case "%^": - // Bitwise XOR - result = op2.(int) ^ op1.(int) - case "%=": - // Equals - result = op2 == op1 - case "%>": - // Greater-than - result = op2.(int) > op1.(int) - case "%<": - // Lesser-than - result = op2.(int) < op1.(int) - case "%A": - // Logical AND - result = op2.(bool) && op1.(bool) - case "%O": - // Logical OR - result = op2.(bool) || op1.(bool) - case "%!": - // Logical complement - result = !op1.(bool) - case "%~": - // Bitwise complement - result = ^(op1.(int)) - } - ps.st.push(result) - } - - i = indices[q][1] - 1 - q++ - } else { - // We are not "inside" a token, so just skip until the end or the next - // token, and add all characters to the buffer. - j := i - if q != len(indices) { - for !(j >= indices[q][0] && j < indices[q][1]) { - j++ - } - } else { - j = len(attr) - } - buf.WriteString(string(attr[i:j])) - i = j - } - } - // Return the buffer as a string. - return buf.String(), nil -} - -// Push a stacker-value onto the stack. -func (st *stack) push(s stacker) { - *st = append(*st, s) -} - -// Pop a stacker-value from the stack. -func (st *stack) pop() (stacker, error) { - if len(*st) == 0 { - return nil, errors.New("Stack is empty.") - } - newStack := make(stack, len(*st)-1) - val := (*st)[len(*st)-1] - copy(newStack, (*st)[:len(*st)-1]) - *st = newStack - return val, nil -} - -// Initialize regexes and the static vars (that don't get changed between -// calls. -func init() { - // Initialize the main regex. - expStr := strings.Join(exp[:], "|") - regex, _ = regexp.Compile(expStr) - // Initialize the static variables. - staticVar = make(map[byte]stacker, 26) -} diff --git a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/types.go b/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/types.go deleted file mode 100644 index 9bcc65e9b..000000000 --- a/src/cmd/linuxkit/vendor/github.com/Nvveen/Gotty/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -type TermInfo struct { - boolAttributes map[string]bool - numAttributes map[string]int16 - strAttributes map[string]string - // The various names of the TermInfo file. - Names []string -} - -type stacker interface { -} -type stack []stacker - -type parser struct { - st stack - parameters []stacker - dynamicVar map[byte]stacker -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/README.md b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/README.md index 7a2357fa9..2323f26f6 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/README.md +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/README.md @@ -1,7 +1,8 @@ -![banner](/docs/images/containerd-dark.png?raw=true) +![containerd banner](https://raw.githubusercontent.com/cncf/artwork/master/projects/containerd/horizontal/color/containerd-horizontal-color.png) [![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd) [![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd) +[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/containerd/containerd?branch=master&svg=true)](https://ci.appveyor.com/project/mlaventure/containerd-3g73f?branch=master) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271) @@ -166,16 +167,14 @@ If you have [criu](https://criu.org/Main_Page) installed on your machine you can ```go // checkpoint the task then push it to a registry -checkpoint, err := task.Checkpoint(context, containerd.WithExit) +checkpoint, err := task.Checkpoint(context) err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint) // on a new machine pull the checkpoint and restore the redis container -image, err := client.Pull(context, "myregistry/checkpoints/redis:master") +checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master") -checkpoint := image.Target() - -redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs")) +redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint)) defer container.Delete(context) task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint)) @@ -184,16 +183,33 @@ defer task.Delete(context) err := task.Start(context) ``` +### Snapshot Plugins + +In addition to the built-in Snapshot plugins in containerd, additional external +plugins can be configured using GRPC. An external plugin is made available using +the configured name and appears as a plugin alongside the built-in ones. + +To add an external snapshot plugin, add the plugin to containerd's config file +(by default at `/etc/containerd/config.toml`). The string following +`proxy_plugin.` will be used as the name of the snapshotter and the address +should refer to a socket with a GRPC listener serving containerd's Snapshot +GRPC API. Remember to restart containerd for any configuration changes to take +effect. + +``` +[proxy_plugins] + [proxy_plugins.customsnapshot] + type = "snapshot" + address = "/var/run/mysnapshotter.sock" +``` + +See [PLUGINS.md](PLUGINS.md) for how to create plugins + ### Releases and API Stability Please see [RELEASES.md](RELEASES.md) for details on versioning and stability of containerd components. -### Development reports. - -Weekly summary on the progress and what is being worked on. -https://github.com/containerd/containerd/tree/master/reports - ### Communication For async communication and long running discussions please use issues and pull requests on the github repo. @@ -201,7 +217,12 @@ This will be the best place to discuss design and implementation. For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. -**Slack:** https://dockr.ly/community +**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. +[Click here for an invite to docker community slack.](https://dockr.ly/slack) + +### Security audit + +A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory. ### Reporting security issues @@ -213,3 +234,21 @@ The containerd codebase is released under the [Apache 2.0 license](LICENSE.code) The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License. You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/. + +## Project details + +**containerd** is the primary open source project within the broader containerd GitHub repository. +However, all projects within the repo have common maintainership, governance, and contributing +guidelines which are stored in a `project` repository commonly for all containerd projects. + +Please find all these core project documents, including the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + +## Adoption + +Interested to see who is using containerd? Are you using containerd in a project? +Please add yourself via pull request to our [ADOPTERS.md](./ADOPTERS.md) file. diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/errors.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000..b5200afc0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + + "github.com/pkg/errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Cause(err) == ErrInvalidArgument +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Cause(err) == ErrNotFound +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Cause(err) == ErrAlreadyExists +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Cause(err) == ErrFailedPrecondition +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Cause(err) == ErrUnavailable +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Cause(err) == ErrNotImplemented +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Cause(err) == context.Canceled +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Cause(err) == context.DeadlineExceeded +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/grpc.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000..209f63bd0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrap(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/log/context.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 000000000..31f1a3ac0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +// TraceLevel is the log level for tracing. Trace level is lower than debug level, +// and is usually used to trace detailed behavior of the program. +const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time is always the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// ParseLevel takes a string level and returns the Logrus log level constant. +// It supports trace level. +func ParseLevel(lvl string) (logrus.Level, error) { + if lvl == "trace" { + return TraceLevel, nil + } + return logrus.ParseLevel(lvl) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// Trace logs a message at level Trace with the log entry passed-in. +func Trace(e *logrus.Entry, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debug(args...) + } +} + +// Tracef logs a message at level Trace with the log entry passed-in. +func Tracef(e *logrus.Entry, format string, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debugf(format, args...) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/compare.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 000000000..3ad22a10d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,229 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) +// For ARMv7, will also match ARMv6 and ARMv5 +// For ARMv6, will also match ARMv5 +func Only(platform specs.Platform) MatchComparer { + platform = Normalize(platform) + if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v7" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v6" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + } + + return singlePlatformComparer{ + Matcher: &matcher{ + Platform: platform, + }, + } +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type singlePlatformComparer struct { + Matcher +} + +func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { + return c.Match(p1) && !c.Match(p2) +} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000..69b336d67 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,117 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +var cpuVariant string + +func init() { + if isArmArch(runtime.GOARCH) { + cpuVariant = getCPUVariant() + } else { + cpuVariant = "" + } +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + switch variant { + case "8", "AArch64": + variant = "v8" + case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6TEJ": + variant = "v6" + case "5", "5T", "5TE", "5TEJ": + variant = "v5" + case "4", "4T": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/database.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 000000000..6ede94061 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 000000000..a14d80e58 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000..e8a7d5ffa --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000..0defbd36c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,31 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: "amd64", + }) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/platforms.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 000000000..77d3f184e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant != "v7" { + p.Variant = cpuVariant + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/vendor.conf b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/vendor.conf index 633f75672..79ebf3ca7 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/vendor.conf +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/vendor.conf @@ -1,85 +1,91 @@ -github.com/containerd/go-runc bcb223a061a3dd7de1a89c0b402a60f4dd9bd307 -github.com/containerd/console 4d8a41f4ce5b9bae77c41786ea2458330f43f081 -github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 -github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 -github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c -github.com/containerd/btrfs 2e1aa0ddf94f91fa282b6ed87c23bf0d64911244 -github.com/containerd/continuity a60600ad77f38aaa70165825f61e2ea72e51c9b1 +github.com/containerd/go-runc e029b79d8cda8374981c64eba71f28ec38e5526f +github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f +github.com/containerd/cgroups c4b9ac5c7601384c965b9646fc515884e091ebb9 +github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40 +github.com/containerd/fifo bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13 +github.com/containerd/btrfs af5082808c833de0e79c1e72eea9fea239364877 +github.com/containerd/continuity f2a389ac0a02ce21c09edd7344677a601970f41c github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6 github.com/docker/go-metrics 4ea375f7759c82740c893fc030bc37088d2ec098 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 -github.com/docker/go-units v0.3.1 +github.com/docker/go-units v0.4.0 github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f github.com/prometheus/client_golang f4fb1b73fb099f396a7f0036bf86aa8def4ed823 github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/gogo/protobuf v1.0.0 -github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef -github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 -github.com/opencontainers/runtime-spec v1.0.1 -github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340 -github.com/sirupsen/logrus v1.0.0 -github.com/pmezard/go-difflib v1.0.0 -github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c -golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 -google.golang.org/grpc v1.10.1 -github.com/pkg/errors v0.8.0 -github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 -golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys +github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/gogo/protobuf v1.2.1 +github.com/gogo/googleapis v1.2.0 +github.com/golang/protobuf v1.2.0 +github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db +github.com/opencontainers/runc dc9208a3303feef5b3839f4323d9beb36df0a9dd # v1.0.0-rc10 +github.com/konsorten/go-windows-terminal-sequences v1.0.1 +github.com/sirupsen/logrus v1.4.1 +github.com/urfave/cli v1.22.0 +golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 +github.com/pkg/errors v0.8.1 +github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 +golang.org/x/sys 9eafafc0a87e0fd0aeeba439a4573537970c44c7 https://github.com/golang/sys github.com/opencontainers/image-spec v1.0.1 -golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 +golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e +github.com/BurntSushi/toml v0.3.1 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 -github.com/Microsoft/go-winio v0.4.5 -github.com/Microsoft/hcsshim v0.6.7 -github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd +github.com/Microsoft/go-winio v0.4.14 +github.com/Microsoft/hcsshim 9e921883ac929bbe515b39793ece99ce3a9d7706 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 -github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 -github.com/gotestyourself/gotestyourself 44dbf532bbf5767611f6f2a61bded572e337010a -github.com/google/go-cmp v0.1.0 +github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f +github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 +gotest.tools v2.3.0 +github.com/google/go-cmp v0.2.0 +go.etcd.io/bbolt v1.3.3 +github.com/hashicorp/errwrap v1.0.0 +github.com/hashicorp/go-multierror v1.0.0 +github.com/hashicorp/golang-lru v0.5.3 +go.opencensus.io v0.22.0 +github.com/imdario/mergo v0.3.7 +github.com/cpuguy83/go-md2man v1.0.10 +github.com/russross/blackfriday v1.5.2 # cri dependencies -github.com/containerd/cri v1.0.4 -github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7 -github.com/blang/semver v3.1.0 -github.com/containernetworking/cni v0.6.0 -github.com/containernetworking/plugins v0.7.0 -github.com/davecgh/go-spew v1.1.0 -github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 +github.com/containerd/cri 50b9e10ea54a9b57049fe311e4fe0a96277ef1c2 # release/1.3 +github.com/containerd/go-cni 49fbd9b210f3c8ee3b7fd3cd797aabaf364627c1 +github.com/containernetworking/cni v0.7.1 +github.com/containernetworking/plugins v0.7.6 +github.com/davecgh/go-spew v1.1.1 +github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00 github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 -github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 -github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee -github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed -github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c -github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 -github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f -github.com/json-iterator/go 1.0.4 -github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d -github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 -github.com/spf13/pflag v1.0.0 -github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc -golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067 -golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 -gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 -k8s.io/api 7e796de92438aede7cb5d6bcf6c10f4fa65db560 -k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9 -k8s.io/apiserver 4a8377c547bbff4576a35b5b5bf4026d9b5aa763 -k8s.io/client-go b9a0cf870f239c4a4ecfd3feb075a50e7cbe1473 -k8s.io/kubernetes v1.10.0 -k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e +github.com/emicklei/go-restful v2.9.5 +github.com/google/gofuzz v1.0.0 +github.com/json-iterator/go v1.1.8 +github.com/modern-go/reflect2 1.0.1 +github.com/modern-go/concurrent 1.0.3 +github.com/opencontainers/selinux 5215b1806f52b1fcc2070a8826c542c9d33cd3cf +github.com/seccomp/libseccomp-golang v0.9.1 +github.com/tchap/go-patricia v2.2.6 +golang.org/x/crypto 69ecbb4d6d5dab05e49161c6e77ea40a030884e1 +golang.org/x/oauth2 0f29369cfe4552d0e4bcddc57cc75f4d7e672a33 +golang.org/x/time 9d24e82272b4f38b78bc8cff74fa936d31ccd8ef +gopkg.in/inf.v0 v0.9.1 +gopkg.in/yaml.v2 53403b58ad1b561927d19068c655246f2db79d48 # v2.2.8 +k8s.io/api kubernetes-1.16.6 +k8s.io/apimachinery kubernetes-1.16.6 +k8s.io/apiserver kubernetes-1.16.6 +k8s.io/cri-api kubernetes-1.16.6 +k8s.io/client-go kubernetes-1.16.6 +k8s.io/klog v1.0.0 +k8s.io/kubernetes v1.16.6 +k8s.io/utils e782cd3c129fc98ee807f3c889c0f26eb7c9daf5 +sigs.k8s.io/yaml v1.1.0 # zfs dependencies -github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec -github.com/mistifyio/go-zfs 166add352731e515512690329794ee593f1aaff2 -github.com/pborman/uuid c65b2f87fee37d1c7854c9164a450713c28d50cd +github.com/containerd/zfs 2ceb2dbb8154202ed1b8fd32e4ea25b491d7b251 +github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb +github.com/google/uuid v1.1.1 # aufs dependencies -github.com/containerd/aufs a7fbd554da7a9eafbe5a460a421313a9fd18d988 +github.com/containerd/aufs f894a800659b6e11c1a13084abd1712f346e349c diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/LICENSE b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/LICENSE index 8f71f43fe..584149b6e 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/LICENSE +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/LICENSE @@ -1,6 +1,7 @@ + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -175,28 +176,16 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright The containerd Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/README.md b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/README.md index 0e91ce07b..f9f9ef0f9 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/README.md +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/README.md @@ -72,3 +72,13 @@ If you change the proto file you will need to rebuild the generated Go with `go ```console $ go generate ./proto ``` + +## Project details + +continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices.go new file mode 100644 index 000000000..e4d4a0370 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import "fmt" + +var ErrNotSupported = fmt.Errorf("not supported") diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_unix.go new file mode 100644 index 000000000..520a5a6f3 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_unix.go @@ -0,0 +1,74 @@ +// +build linux darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import ( + "fmt" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo") + } + + dev := uint64(sys.Rdev) + return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil +} + +// mknod provides a shortcut for syscall.Mknod +func Mknod(p string, mode os.FileMode, maj, min int) error { + var ( + m = syscallMode(mode.Perm()) + dev uint64 + ) + + if mode&os.ModeDevice != 0 { + dev = unix.Mkdev(uint32(maj), uint32(min)) + + if mode&os.ModeCharDevice != 0 { + m |= unix.S_IFCHR + } else { + m |= unix.S_IFBLK + } + } else if mode&os.ModeNamedPipe != 0 { + m |= unix.S_IFIFO + } + + return unix.Mknod(p, m, int(dev)) +} + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= unix.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= unix.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= unix.S_ISVTX + } + return +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_windows.go new file mode 100644 index 000000000..04627c805 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/devices/devices_windows.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package devices + +import ( + "os" + + "github.com/pkg/errors" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows") +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver.go new file mode 100644 index 000000000..327e96af1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver.go @@ -0,0 +1,174 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "fmt" + "io" + "os" +) + +var ErrNotSupported = fmt.Errorf("not supported") + +// Driver provides all of the system-level functions in a common interface. +// The context should call these with full paths and should never use the `os` +// package or any other package to access resources on the filesystem. This +// mechanism let's us carefully control access to the context and maintain +// path and resource integrity. It also gives us an interface to reason about +// direct resource access. +// +// Implementations don't need to do much other than meet the interface. For +// example, it is not required to wrap os.FileInfo to return correct paths for +// the call to Name(). +type Driver interface { + // Note that Open() returns a File interface instead of *os.File. This + // is because os.File is a struct, so if Open was to return *os.File, + // the only way to fulfill the interface would be to call os.Open() + Open(path string) (File, error) + OpenFile(path string, flag int, perm os.FileMode) (File, error) + + Stat(path string) (os.FileInfo, error) + Lstat(path string) (os.FileInfo, error) + Readlink(p string) (string, error) + Mkdir(path string, mode os.FileMode) error + Remove(path string) error + + Link(oldname, newname string) error + Lchmod(path string, mode os.FileMode) error + Lchown(path string, uid, gid int64) error + Symlink(oldname, newname string) error + + MkdirAll(path string, perm os.FileMode) error + RemoveAll(path string) error + + // TODO(aaronl): These methods might move outside the main Driver + // interface in the future as more platforms are added. + Mknod(path string, mode os.FileMode, major int, minor int) error + Mkfifo(path string, mode os.FileMode) error +} + +// File is the interface for interacting with files returned by continuity's Open +// This is needed since os.File is a struct, instead of an interface, so it can't +// be used. +type File interface { + io.ReadWriteCloser + io.Seeker + Readdir(n int) ([]os.FileInfo, error) +} + +func NewSystemDriver() (Driver, error) { + // TODO(stevvooe): Consider having this take a "hint" path argument, which + // would be the context root. The hint could be used to resolve required + // filesystem support when assembling the driver to use. + return &driver{}, nil +} + +// XAttrDriver should be implemented on operation systems and filesystems that +// have xattr support for regular files and directories. +type XAttrDriver interface { + // Getxattr returns all of the extended attributes for the file at path. + // Typically, this takes a syscall call to Listxattr and Getxattr. + Getxattr(path string) (map[string][]byte, error) + + // Setxattr sets all of the extended attributes on file at path, following + // any symbolic links, if necessary. All attributes on the target are + // replaced by the values from attr. If the operation fails to set any + // attribute, those already applied will not be rolled back. + Setxattr(path string, attr map[string][]byte) error +} + +// LXAttrDriver should be implemented by drivers on operating systems and +// filesystems that support setting and getting extended attributes on +// symbolic links. If this is not implemented, extended attributes will be +// ignored on symbolic links. +type LXAttrDriver interface { + // LGetxattr returns all of the extended attributes for the file at path + // and does not follow symlinks. Typically, this takes a syscall call to + // Llistxattr and Lgetxattr. + LGetxattr(path string) (map[string][]byte, error) + + // LSetxattr sets all of the extended attributes on file at path, without + // following symbolic links. All attributes on the target are replaced by + // the values from attr. If the operation fails to set any attribute, + // those already applied will not be rolled back. + LSetxattr(path string, attr map[string][]byte) error +} + +type DeviceInfoDriver interface { + DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) +} + +// driver is a simple default implementation that sends calls out to the "os" +// package. Extend the "driver" type in system-specific files to add support, +// such as xattrs, which can add support at compile time. +type driver struct{} + +var _ File = &os.File{} + +// LocalDriver is the exported Driver struct for convenience. +var LocalDriver Driver = &driver{} + +func (d *driver) Open(p string) (File, error) { + return os.Open(p) +} + +func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) { + return os.OpenFile(path, flag, perm) +} + +func (d *driver) Stat(p string) (os.FileInfo, error) { + return os.Stat(p) +} + +func (d *driver) Lstat(p string) (os.FileInfo, error) { + return os.Lstat(p) +} + +func (d *driver) Mkdir(p string, mode os.FileMode) error { + return os.Mkdir(p, mode) +} + +// Remove is used to unlink files and remove directories. +// This is following the golang os package api which +// combines the operations into a higher level Remove +// function. If explicit unlinking or directory removal +// to mirror system call is required, they should be +// split up at that time. +func (d *driver) Remove(path string) error { + return os.Remove(path) +} + +func (d *driver) Link(oldname, newname string) error { + return os.Link(oldname, newname) +} + +func (d *driver) Lchown(name string, uid, gid int64) error { + // TODO: error out if uid excesses int bit width? + return os.Lchown(name, int(uid), int(gid)) +} + +func (d *driver) Symlink(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (d *driver) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (d *driver) RemoveAll(path string) error { + return os.RemoveAll(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_unix.go new file mode 100644 index 000000000..6cb5d10fb --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_unix.go @@ -0,0 +1,138 @@ +// +build linux darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "errors" + "fmt" + "os" + "sort" + + "github.com/containerd/continuity/devices" + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + err := devices.Mknod(path, mode, major, minor) + if err != nil { + err = &os.PathError{Op: "mknod", Path: path, Err: err} + } + return err +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + if mode&os.ModeNamedPipe == 0 { + return errors.New("mode passed to Mkfifo does not have the named pipe bit set") + } + // mknod with a mode that has ModeNamedPipe set creates a fifo, not a + // device. + err := devices.Mknod(path, mode, 0, 0) + if err != nil { + err = &os.PathError{Op: "mkfifo", Path: path, Err: err} + } + return err +} + +// Getxattr returns all of the extended attributes for the file at path p. +func (d *driver) Getxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.Listxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.Getxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// Setxattr sets all of the extended attributes on file at path, following +// any symbolic links, if necessary. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.Setxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +// LGetxattr returns all of the extended attributes for the file at path p +// not following symbolic links. +func (d *driver) LGetxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.LListxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.LGetxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// LSetxattr sets all of the extended attributes on file at path, not +// following any symbolic links. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.LSetxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) { + return devices.DeviceInfo(fi) +} + +// Readlink was forked on Windows to fix a Golang bug, use the "os" package here +func (d *driver) Readlink(p string) (string, error) { + return os.Readlink(p) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_windows.go new file mode 100644 index 000000000..f1dcea32a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/driver_windows.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return &os.PathError{Op: "mknod", Path: path, Err: ErrNotSupported} +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + return &os.PathError{Op: "mkfifo", Path: path, Err: ErrNotSupported} +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + // TODO: Use Window's equivalent + return os.Chmod(path, mode) +} + +// Readlink is forked in order to support Volume paths which are used +// in container layers. +func (d *driver) Readlink(p string) (string, error) { + return sysx.Readlink(p) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_linux.go new file mode 100644 index 000000000..06be28527 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_linux.go @@ -0,0 +1,39 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Lchmod changes the mode of a file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) error { + // On Linux, file mode is not supported for symlinks, + // and fchmodat() does not support AT_SYMLINK_NOFOLLOW, + // so symlinks need to be skipped entirely. + if st, err := os.Stat(path); err == nil && st.Mode()&os.ModeSymlink != 0 { + return nil + } + + err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), 0) + if err != nil { + err = &os.PathError{Op: "lchmod", Path: path, Err: err} + } + return err +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_unix.go new file mode 100644 index 000000000..b8877a8ae --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/lchmod_unix.go @@ -0,0 +1,34 @@ +// +build darwin freebsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Lchmod changes the mode of a file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) error { + err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + err = &os.PathError{Op: "lchmod", Path: path, Err: err} + } + return err +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/utils.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/utils.go new file mode 100644 index 000000000..0c688d158 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/driver/utils.go @@ -0,0 +1,90 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package driver + +import ( + "io" + "io/ioutil" + "os" + "sort" +) + +// ReadFile works the same as ioutil.ReadFile with the Driver abstraction +func ReadFile(r Driver, filename string) ([]byte, error) { + f, err := r.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return data, nil +} + +// WriteFile works the same as ioutil.WriteFile with the Driver abstraction +func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { + f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer f.Close() + + n, err := f.Write(data) + if err != nil { + return err + } else if n != len(data) { + return io.ErrShortWrite + } + + return nil +} + +// ReadDir works the same as ioutil.ReadDir with the Driver abstraction +func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) { + f, err := r.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + dirs, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + sort.Sort(fileInfos(dirs)) + return dirs, nil +} + +// Simple implementation of the sort.Interface for os.FileInfo +type fileInfos []os.FileInfo + +func (fis fileInfos) Len() int { + return len(fis) +} + +func (fis fileInfos) Less(i, j int) bool { + return fis[i].Name() < fis[j].Name() +} + +func (fis fileInfos) Swap(i, j int) { + fis[i], fis[j] = fis[j], fis[i] +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy.go new file mode 100644 index 000000000..818bba2cd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy.go @@ -0,0 +1,176 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +// XAttrErrorHandlers transform a non-nil xattr error. +// Return nil to ignore an error. +// xattrKey can be empty for listxattr operation. +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type copyDirOpts struct { + xeh XAttrErrorHandler +} + +type CopyDirOpt func(*copyDirOpts) error + +// WithXAttrErrorHandler allows specifying XAttrErrorHandler +// If nil XAttrErrorHandler is specified (default), CopyDir stops +// on a non-nil xattr error. +func WithXAttrErrorHandler(xeh XAttrErrorHandler) CopyDirOpt { + return func(o *copyDirOpts) error { + o.xeh = xeh + return nil + } +} + +// WithAllowXAttrErrors allows ignoring xattr errors. +func WithAllowXAttrErrors() CopyDirOpt { + xeh := func(dst, src, xattrKey string, err error) error { + return nil + } + return WithXAttrErrorHandler(xeh) +} + +// CopyDir copies the directory from src to dst. +// Most efficient copy of files is attempted. +func CopyDir(dst, src string, opts ...CopyDirOpt) error { + var o copyDirOpts + for _, opt := range opts { + if err := opt(&o); err != nil { + return err + } + } + inodes := map[uint64]string{} + return copyDirectory(dst, src, inodes, &o) +} + +func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) error { + stat, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + if !stat.IsDir() { + return errors.Errorf("source %s is not directory", src) + } + + if st, err := os.Stat(dst); err != nil { + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return errors.Errorf("cannot copy to non-directory: %s", dst) + } else { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "failed to read %s", src) + } + + if err := copyFileInfo(stat, dst); err != nil { + return errors.Wrapf(err, "failed to copy file info for %s", dst) + } + + if err := copyXAttrs(dst, src, o.xeh); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + + for _, fi := range fis { + source := filepath.Join(src, fi.Name()) + target := filepath.Join(dst, fi.Name()) + + switch { + case fi.IsDir(): + if err := copyDirectory(target, source, inodes, o); err != nil { + return err + } + continue + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := CopyFile(target, source); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(source) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", source) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + if err := copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, source, o.xeh); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + + return nil +} + +// CopyFile copies the source file to the target. +// The most efficient means of copying is used for the platform. +func CopyFile(target, source string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_linux.go new file mode 100644 index 000000000..72bae7d4e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(StatAtime(st))), + unix.NsecToTimespec(syscall.TimespecToNsec(StatMtime(st))), + } + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +const maxSSizeT = int64(^uint(0) >> 1) + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + size := st.Size() + first := true + srcFd := int(src.Fd()) + dstFd := int(dst.Fd()) + + for size > 0 { + // Ensure that we are never trying to copy more than SSIZE_MAX at a + // time and at the same time avoids overflows when the file is larger + // than 4GB on 32-bit systems. + var copySize int + if size > maxSSizeT { + copySize = int(maxSSizeT) + } else { + copySize = int(size) + } + n, err := unix.CopyFileRange(srcFd, nil, dstFd, nil, copySize, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + size -= int64(n) + } + + return nil +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_unix.go new file mode 100644 index 000000000..a5de89261 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -0,0 +1,112 @@ +// +build darwin freebsd openbsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} + if err := syscall.UtimesNano(name, timespec); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + e := errors.Wrapf(err, "failed to list xattrs on %s", src) + if xeh != nil { + e = xeh(dst, src, "", e) + } + return e + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + e := errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + e := errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + if xeh != nil { + if e = xeh(dst, src, xattr, e); e == nil { + continue + } + } + return e + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_windows.go new file mode 100644 index 000000000..27c7d7dbb --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff.go new file mode 100644 index 000000000..e64f9e73d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff.go @@ -0,0 +1,326 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/sirupsen/logrus" +) + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindUnmodified represents an unmodified + // file + ChangeKindUnmodified = iota + + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +func (k ChangeKind) String() string { + switch k { + case ChangeKindUnmodified: + return "unmodified" + case ChangeKindAdd: + return "add" + case ChangeKindModify: + return "modify" + case ChangeKindDelete: + return "delete" + default: + return "" + } +} + +// Change represents single change between a diff and its parent. +type Change struct { + Kind ChangeKind + Path string +} + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +// Changes computes changes between two directories calling the +// given change function for each computed change. The first +// directory is intended to the base directory and second +// directory the changed directory. +// +// The change callback is called by the order of path names and +// should be appliable in that order. +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. +// +// Opaque directories will not be treated specially and each file +// removed from the base directory will show up as a removal. +// +// File content comparisons will be done on files which have timestamps +// which may have been truncated. If either of the files being compared +// has a zero value nanosecond value, each byte will be compared for +// differences. If 2 files have the same seconds value but different +// nanosecond values where one of those values is zero, the files will +// be considered unchanged if the content is the same. This behavior +// is to account for timestamp truncation during archiving. +func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { + if a == "" { + logrus.Debugf("Using single walk diff for %s", b) + return addDirChanges(ctx, changeFn, b) + } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { + logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) + return diffDirChanges(ctx, changeFn, a, diffOptions) + } + + logrus.Debugf("Using double walk diff for %s from %s", b, a) + return doubleWalkDiff(ctx, changeFn, a, b) +} + +func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + return changeFn(ChangeKindAdd, path, f, nil) + }) +} + +// diffDirOptions is used when the diff can be directly calculated from +// a diff directory to its base, without walking both trees. +type diffDirOptions struct { + diffDir string + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +} + +// diffDirChanges walks the diff directory and compares changes against the base. +func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { + changedDirs := make(map[string]struct{}) + return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(o.diffDir, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // TODO: handle opaqueness, start new double walker at this + // location to get deletes, and skip tree in single walker + + if o.skipChange != nil { + if skip, err := o.skipChange(path); skip { + return err + } + } + + var kind ChangeKind + + deletedFile, err := o.deleteChange(o.diffDir, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + path = deletedFile + kind = ChangeKindDelete + f = nil + } else { + // Otherwise, the file was added + kind = ChangeKindAdd + + // ...Unless it already existed in a base, in which case, it's a modification + stat, err := os.Stat(filepath.Join(base, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the base, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + kind = ChangeKindModify + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if kind == ChangeKindAdd || kind == ChangeKindDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + pi, err := os.Stat(filepath.Join(o.diffDir, parent)) + if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { + return err + } + changedDirs[parent] = struct{}{} + } + } + + return changeFn(kind, path, f, nil) + }) +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath) + c2 = make(chan *currentPath) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return pathWalk(ctx, a, c1) + }) + g.Go(func() error { + defer close(c2) + return pathWalk(ctx, b, c2) + }) + g.Go(func() error { + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + if !isLinked(f) { + continue + } + k = ChangeKindUnmodified + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_unix.go new file mode 100644 index 000000000..7913af27d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// detectDirDiff returns diff dir options if a directory could +// be found in the mount info for upper which is the direct +// diff with the provided lower directory +func detectDirDiff(upper, lower string) *diffDirOptions { + // TODO: get mount options for upper + // TODO: detect AUFS + // TODO: detect overlay + return nil +} + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +func isLinked(f os.FileInfo) bool { + s, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return !f.IsDir() && s.Nlink > 1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_windows.go new file mode 100644 index 000000000..4bfa72d3a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/diff_windows.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func detectDirDiff(upper, lower string) *diffDirOptions { + return nil +} + +func compareSysStat(s1, s2 interface{}) (bool, error) { + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + // TODO: Use windows equivalent + return true, nil +} + +func isLinked(os.FileInfo) bool { + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/dtype_linux.go new file mode 100644 index 000000000..10510d8de --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -0,0 +1,103 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du.go new file mode 100644 index 000000000..fccc985dc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "context" + +// Usage of disk information +type Usage struct { + Inodes int64 + Size int64 +} + +// DiskUsage counts the number of inodes and disk usage for the resources under +// path. +func DiskUsage(ctx context.Context, roots ...string) (Usage, error) { + return diskUsage(ctx, roots...) +} + +// DiffUsage counts the numbers of inodes and disk usage in the +// diff between the 2 directories. The first path is intended +// as the base directory and the second as the changed directory. +func DiffUsage(ctx context.Context, a, b string) (Usage, error) { + return diffUsage(ctx, a, b) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_unix.go new file mode 100644 index 000000000..e22ffbea3 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +type inode struct { + // TODO(stevvooe): Can probably reduce memory usage by not tracking + // device, but we can leave this right for now. + dev, ino uint64 +} + +func newInode(stat *syscall.Stat_t) inode { + return inode{ + // Dev is uint32 on darwin/bsd, uint64 on linux/solaris + dev: uint64(stat.Dev), // nolint: unconvert + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + ino: uint64(stat.Ino), // nolint: unconvert + } +} + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_windows.go new file mode 100644 index 000000000..8f25ec59c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -0,0 +1,82 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "context" + "os" + "path/filepath" +) + +func diskUsage(ctx context.Context, roots ...string) (Usage, error) { + var ( + size int64 + ) + + // TODO(stevvooe): Support inodes (or equivalent) for windows. + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + size += fi.Size() + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + size += fi.Size() + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Size: size, + }, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink.go new file mode 100644 index 000000000..762aa45e6 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_unix.go new file mode 100644 index 000000000..f95f0904c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_windows.go new file mode 100644 index 000000000..748554714 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/hardlink_windows.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/path.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/path.go new file mode 100644 index 000000000..8863caa9d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/path.go @@ -0,0 +1,313 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type currentPath struct { + path string + f os.FileInfo + fullPath string +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := directoryCompare(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func directoryCompare(a, b string) int { + l := len(a) + if len(b) < l { + l = len(b) + } + for i := 0; i < l; i++ { + c1, c2 := a[i], b[i] + if c1 == filepath.Separator { + c1 = byte(0) + } + if c2 == filepath.Separator { + c2 = byte(0) + } + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(a) < len(b) { + return -1 + } + if len(a) > len(b) { + return +1 + } + return 0 +} + +func sameFile(f1, f2 *currentPath) (bool, error) { + if os.SameFile(f1.f, f2.f) { + return true, nil + } + + equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { + return eq, err + } + + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err + } + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +const compareChuckSize = 32 * 1024 + +// compareFileContent compares the content of 2 same sized files +// by comparing each byte. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + + b1 := make([]byte, compareChuckSize) + b2 := make([]byte, compareChuckSize) + for { + n1, err1 := f1.Read(b1) + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := f2.Read(b2) + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} + +func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + p := ¤tPath{ + path: path, + f: f, + fullPath: filepath.Join(root, path), + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} + +// RootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +func RootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go new file mode 100644 index 000000000..cb7400a33 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_darwinfreebsd.go @@ -0,0 +1,44 @@ +// +build darwin freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go new file mode 100644 index 000000000..c68df6e58 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/stat_linuxopenbsd.go @@ -0,0 +1,45 @@ +// +build linux openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/time.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/time.go new file mode 100644 index 000000000..cde456123 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/fs/time.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fs + +import "time" + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/go.mod b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/go.mod new file mode 100644 index 000000000..86a7f148c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/go.mod @@ -0,0 +1,23 @@ +module github.com/containerd/continuity + +go 1.11 + +require ( + bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898 + github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 + github.com/golang/protobuf v1.2.0 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/onsi/ginkgo v1.10.1 // indirect + github.com/onsi/gomega v1.7.0 // indirect + github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7 + github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2 + github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee + github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95 // indirect + github.com/stretchr/testify v1.4.0 // indirect + golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3 // indirect + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect +) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/pathdriver/path_driver.go index b43d55fe9..b0d5a6b56 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/pathdriver/path_driver.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -1,3 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package pathdriver import ( diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go new file mode 100644 index 000000000..0bfa6a040 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_unix.go @@ -0,0 +1,26 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import "syscall" + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + return syscall.Readlink(path, buf) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go new file mode 100644 index 000000000..2ba814990 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/syscallx/syscall_windows.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package syscallx + +import ( + "syscall" + "unsafe" +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +const ( + _IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + _SYMLINK_FLAG_RELATIVE = 1 +) + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer syscall.CloseHandle(fd) + + rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if data.Flags&_SYMLINK_FLAG_RELATIVE == 0 { + if len(s) >= 4 && s[:4] == `\??\` { + s = s[4:] + switch { + case len(s) >= 2 && s[1] == ':': // \??\C:\foo\bar + // do nothing + case len(s) >= 4 && s[:4] == `UNC\`: // \??\UNC\foo\bar + s = `\\` + s[4:] + default: + // unexpected; do nothing + } + } else { + // unexpected; do nothing + } + } + case _IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameOffset+data.SubstituteNameLength)/2]) + if len(s) >= 4 && s[:4] == `\??\` { // \??\C:\foo\bar + if len(s) < 48 || s[:11] != `\??\Volume{` { + s = s[4:] + } + } else { + // unexpected; do nothing + } + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/README.md b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/README.md new file mode 100644 index 000000000..ad7aee533 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/README.md @@ -0,0 +1,3 @@ +This package is for internal use only. It is intended to only have +temporary changes before they are upstreamed to golang.org/x/sys/ +(a.k.a. https://github.com/golang/sys). diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/file_posix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/file_posix.go new file mode 100644 index 000000000..e28f3a1b5 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/file_posix.go @@ -0,0 +1,128 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "os" + "path/filepath" + + "github.com/containerd/continuity/syscallx" +) + +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +func Readlink(name string) (string, error) { + for len := 128; ; len *= 2 { + b := make([]byte, len) + n, e := fixCount(syscallx.Readlink(fixLongPath(name), b)) + if e != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: e} + } + if n < len { + return string(b[0:n]), nil + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// fixLongPath returns the extended-length (\\?\-prefixed) form of +// path when needed, in order to avoid the default 260 character file +// path limit imposed by Windows. If path is not easily converted to +// the extended-length form (for example, if path is a relative path +// or contains .. elements), or is short enough, fixLongPath returns +// path unmodified. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath +func fixLongPath(path string) string { + // Do nothing (and don't allocate) if the path is "short". + // Empirically (at least on the Windows Server 2013 builder), + // the kernel is arbitrarily okay with < 248 bytes. That + // matches what the docs above say: + // "When using an API to create a directory, the specified + // path cannot be so long that you cannot append an 8.3 file + // name (that is, the directory name cannot exceed MAX_PATH + // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. + // + // The MSDN docs appear to say that a normal path that is 248 bytes long + // will work; empirically the path must be less then 248 bytes long. + if len(path) < 248 { + // Don't fix. (This is how Go 1.7 and earlier worked, + // not automatically generating the \\?\ form) + return path + } + + // The extended form begins with \\?\, as in + // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. + // The extended form disables evaluation of . and .. path + // elements and disables the interpretation of / as equivalent + // to \. The conversion here rewrites / to \ and elides + // . elements as well as trailing or duplicate separators. For + // simplicity it avoids the conversion entirely for relative + // paths or paths containing .. elements. For now, + // \\server\share paths are not converted to + // \\?\UNC\server\share paths because the rules for doing so + // are less well-specified. + if len(path) >= 2 && path[:2] == `\\` { + // Don't canonicalize UNC paths. + return path + } + if !filepath.IsAbs(path) { + // Relative path + return path + } + + const prefix = `\\?` + + pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) + copy(pathbuf, prefix) + n := len(path) + r, w := 0, len(prefix) + for r < n { + switch { + case os.IsPathSeparator(path[r]): + // empty block + r++ + case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): + // /./ + r++ + case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): + // /../ is currently unhandled + return path + default: + pathbuf[w] = '\\' + w++ + for ; r < n && !os.IsPathSeparator(path[r]); r++ { + pathbuf[w] = path[r] + w++ + } + } + } + // A drive's root directory needs a trailing \ + if w == len(`\\?\c:`) { + pathbuf[w] = '\\' + w++ + } + return string(pathbuf[:w]) +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 000000000..28ce5d8de --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 000000000..e0575f446 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,24 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 000000000..de4b3d50c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,25 @@ +// +build darwin freebsd openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 000000000..db6fe70fe --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,117 @@ +// +build linux darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "bytes" + + "golang.org/x/sys/unix" +) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unix.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Getxattr) +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Llistxattr) +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unix.Lremovexattr(path, attr) +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Lsetxattr(path, attr, data, flags) +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Lgetxattr) +} + +const defaultXattrBufferSize = 128 + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + buf := make([]byte, defaultXattrBufferSize) + n, err := listFunc(path, buf) + for err == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + n, err = listFunc(path, []byte{}) + if err != nil { + return nil, err + } + buf = make([]byte, n) + n, err = listFunc(path, buf) + } + if err != nil { + return nil, err + } + + ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + if len(p) > 0 { + entries = append(entries, string(p)) + } + } + + return entries, nil +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + buf := make([]byte, defaultXattrBufferSize) + n, err := getFunc(path, attr, buf) + for err == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + n, err = getFunc(path, attr, []byte{}) + if err != nil { + return nil, err + } + buf = make([]byte, n) + n, err = getFunc(path, attr, buf) + } + if err != nil { + return nil, err + } + return buf[:n], nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 000000000..c9ef3a1d2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,67 @@ +// +build !linux,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sysx + +import ( + "errors" + "runtime" +) + +var unsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/vendor.conf b/src/cmd/linuxkit/vendor/github.com/containerd/continuity/vendor.conf deleted file mode 100644 index 7c80deec5..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/continuity/vendor.conf +++ /dev/null @@ -1,13 +0,0 @@ -bazil.org/fuse 371fbbdaa8987b715bdd21d6adc4c9b20155f748 -github.com/dustin/go-humanize bb3d318650d48840a39aa21a027c6630e198e626 -github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf -github.com/pkg/errors f15c970de5b76fac0b59abb32d62c17cc7bed265 -github.com/sirupsen/logrus 89742aefa4b206dcf400792f3bd35b542998eb3b -github.com/spf13/cobra 2da4a54c5ceefcee7ca5dd0eea1e18a3b6366489 -github.com/spf13/pflag 4c012f6dcd9546820e378d0bdda4d8fc772cdfea -golang.org/x/crypto 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94 -golang.org/x/net a337091b0525af65de94df2eb7e98bd9962dcbe2 -golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -golang.org/x/sys 665f6529cca930e27b831a0d1dafffbe1c172924 diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/LICENSE b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/NOTICE b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/README.md b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/README.md new file mode 100644 index 000000000..b83d0683c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/README.md @@ -0,0 +1,71 @@ +# go-systemd + +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) +![minimum golang 1.12](https://img.shields.io/badge/golang-1.12%2B-orange.svg) + + +Go bindings to systemd. The project has several packages: + +- `activation` - for writing and using socket activation from Go +- `daemon` - for notifying systemd of service status changes +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `login1` - for integration with the systemd logind API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files + +## Socket Activation + +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## systemd Service Notification + +The `daemon` package is an implementation of the [sd_notify protocol](https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description). It can be used to inform systemd of service start-up completion, watchdog events, and other status changes. + +## D-Bus + +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` + +## Journal + +### Writing to the Journal + +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. + +### Reading from the Journal + +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. + +## logind + +The `login1` package provides functions to integrate with the [systemd logind API](http://www.freedesktop.org/wiki/Software/systemd/logind/). + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/go.mod b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/go.mod new file mode 100644 index 000000000..6112fb0bd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/go.mod @@ -0,0 +1,5 @@ +module github.com/coreos/go-systemd/v22 + +go 1.12 + +require github.com/godbus/dbus/v5 v5.0.3 diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/LICENSE b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/NOTICE b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/README.md b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/README.md new file mode 100644 index 000000000..b83d0683c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/README.md @@ -0,0 +1,71 @@ +# go-systemd + +[![Build Status](https://travis-ci.org/coreos/go-systemd.png?branch=master)](https://travis-ci.org/coreos/go-systemd) +[![godoc](https://godoc.org/github.com/coreos/go-systemd?status.svg)](http://godoc.org/github.com/coreos/go-systemd) +![minimum golang 1.12](https://img.shields.io/badge/golang-1.12%2B-orange.svg) + + +Go bindings to systemd. The project has several packages: + +- `activation` - for writing and using socket activation from Go +- `daemon` - for notifying systemd of service status changes +- `dbus` - for starting/stopping/inspecting running services and units +- `journal` - for writing to systemd's logging service, journald +- `sdjournal` - for reading from journald by wrapping its C API +- `login1` - for integration with the systemd logind API +- `machine1` - for registering machines/containers with systemd +- `unit` - for (de)serialization and comparison of unit files + +## Socket Activation + +An example HTTP server using socket activation can be quickly set up by following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## systemd Service Notification + +The `daemon` package is an implementation of the [sd_notify protocol](https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description). It can be used to inform systemd of service start-up completion, watchdog events, and other status changes. + +## D-Bus + +The `dbus` package connects to the [systemd D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/dbus/) and lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` + +## Journal + +### Writing to the Journal + +Using the pure-Go `journal` package you can submit journal entries directly to systemd's journal, taking advantage of features like indexed key/value pairs for each log entry. + +### Reading from the Journal + +The `sdjournal` package provides read access to the journal by wrapping around journald's native C API; consequently it requires cgo and the journal headers to be available. + +## logind + +The `login1` package provides functions to integrate with the [systemd logind API](http://www.freedesktop.org/wiki/Software/systemd/logind/). + +## machined + +The `machine1` package allows interaction with the [systemd machined D-Bus API](http://www.freedesktop.org/wiki/Software/systemd/machined/). + +## Units + +The `unit` package provides various functions for working with [systemd unit files](http://www.freedesktop.org/software/systemd/man/systemd.unit.html). diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go new file mode 100644 index 000000000..91584a166 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -0,0 +1,240 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus/v5" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// New establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func New() (*Conn, error) { + conn, err := NewSystemConnection() + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnection() + } + return conn, err +} + +// NewSystemConnection establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection +func NewSystemConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SystemBusPrivate) + }) +} + +// NewUserConnection establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(dbus.SessionBusPrivate) + }) +} + +// NewSystemdConnection establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnection() (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private") + }) + }) +} + +// Close closes an established connection +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus() + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go new file mode 100644 index 000000000..e38659d7b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -0,0 +1,600 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus/v5" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// ResetFailedUnit resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnit(name string) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. +func (c *Conn) SystemState() (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(path, "org.freedesktop.systemd1.Unit") +} + +// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties. +func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetServiceProperty returns property for given service name and property name +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(path, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// ListUnitsFiltered returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// ListUnitsByPatterns returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// ListUnitsByNames returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// Note: Requires systemd v230 or higher +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// LinkUnitFiles() links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// MaskUnitFiles masks one or more units in the system +// +// It takes three arguments: +// * list of units to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// * force flag +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// UnmaskUnitFiles unmasks one or more units in the system +// +// It takes two arguments: +// * list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside +// the usual unit search paths) +// * runtime to specify whether the unit was enabled for runtime +// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go new file mode 100644 index 000000000..fb42b6273 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus/v5" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + { + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/set.go new file mode 100644 index 000000000..17c5d4856 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go new file mode 100644 index 000000000..7e370fea2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus/v5" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// SubscribeUnits returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go new file mode 100644 index 000000000..5b408d584 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/go.mod b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/go.mod new file mode 100644 index 000000000..6112fb0bd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/coreos/go-systemd/v22/go.mod @@ -0,0 +1,5 @@ +module github.com/coreos/go-systemd/v22 + +go 1.12 + +require github.com/godbus/dbus/v5 v5.0.3 diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/cli.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/cli.go index b17aaf238..b6f42efc8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/cli.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/cli.go @@ -8,17 +8,20 @@ import ( "os" "path/filepath" "runtime" + "strconv" "time" "github.com/docker/cli/cli" "github.com/docker/cli/cli/config" cliconfig "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/connhelper" cliflags "github.com/docker/cli/cli/flags" manifeststore "github.com/docker/cli/cli/manifest/store" registryclient "github.com/docker/cli/cli/registry/client" "github.com/docker/cli/cli/trust" dopts "github.com/docker/cli/opts" + clitypes "github.com/docker/cli/types" "github.com/docker/docker/api" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" @@ -53,19 +56,21 @@ type Cli interface { ManifestStore() manifeststore.Store RegistryClient(bool) registryclient.RegistryClient ContentTrustEnabled() bool + NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) } // DockerCli is an instance the docker command line client. // Instances of the client can be returned from NewDockerCli. type DockerCli struct { - configFile *configfile.ConfigFile - in *InStream - out *OutStream - err io.Writer - client client.APIClient - serverInfo ServerInfo - clientInfo ClientInfo - contentTrust bool + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + client client.APIClient + serverInfo ServerInfo + clientInfo ClientInfo + contentTrust bool + newContainerizeClient func(string) (clitypes.ContainerizedClient, error) } // DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. @@ -129,6 +134,20 @@ func (cli *DockerCli) ContentTrustEnabled() bool { return cli.contentTrust } +// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting +// or otherwise the client-side DOCKER_BUILDKIT environment variable +func BuildKitEnabled(si ServerInfo) (bool, error) { + buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit + if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" { + var err error + buildkitEnabled, err = strconv.ParseBool(buildkitEnv) + if err != nil { + return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + } + } + return buildkitEnabled, nil +} + // ManifestStore returns a store for local manifests func (cli *DockerCli) ManifestStore() manifeststore.Store { // TODO: support override default location from config file @@ -205,6 +224,7 @@ func (cli *DockerCli) initializeFromClient() { cli.serverInfo = ServerInfo{ HasExperimental: ping.Experimental, OSType: ping.OSType, + BuildkitVersion: ping.BuilderVersion, } cli.client.NegotiateAPIVersionPing(ping) } @@ -228,11 +248,17 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...) } +// NewContainerizedEngineClient returns a containerized engine client +func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) { + return cli.newContainerizeClient(sockPath) +} + // ServerInfo stores details about the supported features and platform of the // server type ServerInfo struct { HasExperimental bool OSType string + BuildkitVersion types.BuilderVersion } // ClientInfo stores details about the supported features of the client @@ -242,8 +268,8 @@ type ClientInfo struct { } // NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli { - return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted} +func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool, containerizedFn func(string) (clitypes.ContainerizedClient, error)) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted, newContainerizeClient: containerizedFn} } // NewAPIClientFromFlags creates a new APIClient from command line flags @@ -252,24 +278,43 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile. if err != nil { return &client.Client{}, err } + var clientOpts []client.Opt + helper, err := connhelper.GetConnectionHelper(host) + if err != nil { + return &client.Client{}, err + } + if helper == nil { + clientOpts = append(clientOpts, withHTTPClient(opts.TLSOptions)) + clientOpts = append(clientOpts, client.WithHost(host)) + } else { + clientOpts = append(clientOpts, func(c *client.Client) error { + httpClient := &http.Client{ + // No tls + // No proxy + Transport: &http.Transport{ + DialContext: helper.Dialer, + }, + } + return client.WithHTTPClient(httpClient)(c) + }) + clientOpts = append(clientOpts, client.WithHost(helper.Host)) + clientOpts = append(clientOpts, client.WithDialContext(helper.Dialer)) + } customHeaders := configFile.HTTPHeaders if customHeaders == nil { customHeaders = map[string]string{} } customHeaders["User-Agent"] = UserAgent() + clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders)) verStr := api.DefaultVersion if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { verStr = tmpStr } + clientOpts = append(clientOpts, client.WithVersion(verStr)) - return client.NewClientWithOpts( - withHTTPClient(opts.TLSOptions), - client.WithHTTPHeaders(customHeaders), - client.WithVersion(verStr), - client.WithHost(host), - ) + return client.NewClientWithOpts(clientOpts...) } func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/registry.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/registry.go index 084d2b605..c12843693 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/registry.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/registry.go @@ -11,6 +11,7 @@ import ( "runtime" "strings" + "github.com/docker/cli/cli/debug" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" @@ -26,9 +27,10 @@ func ElectAuthServer(ctx context.Context, cli Cli) string { // example a Linux client might be interacting with a Windows daemon, hence // the default registry URL might be Windows specific. serverAddress := registry.IndexServer - if info, err := cli.Client().Info(ctx); err != nil { + if info, err := cli.Client().Info(ctx); err != nil && debug.IsEnabled() { + // Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else if info.IndexServerAddress == "" { + } else if info.IndexServerAddress == "" && debug.IsEnabled() { fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress) } else { serverAddress = info.IndexServerAddress diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/utils.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/utils.go index dc543e7dc..13954c010 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/utils.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/command/utils.go @@ -102,14 +102,14 @@ func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { // CLI label filter supersede config.json. // If CLI label filter conflict with config.json, // skip adding label! filter in config.json. - if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { + if pruneFilters.Contains("label!") && pruneFilters.ExactMatch("label!", parts[1]) { continue } } else if parts[0] == "label!" { // CLI label! filter supersede config.json. // If CLI label! filter conflict with config.json, // skip adding label filter in config.json. - if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { + if pruneFilters.Contains("label") && pruneFilters.ExactMatch("label", parts[1]) { continue } } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go index 143436b9e..9161921a2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go @@ -96,12 +96,12 @@ func Load(configDir string) (*configfile.ConfigFile, error) { } file, err := os.Open(confFile) if err != nil { - return configFile, errors.Wrap(err, confFile) + return configFile, errors.Wrap(err, filename) } defer file.Close() err = configFile.LegacyLoadFromReader(file) if err != nil { - return configFile, errors.Wrap(err, confFile) + return configFile, errors.Wrap(err, filename) } return configFile, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go new file mode 100644 index 000000000..95b24f576 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go @@ -0,0 +1,302 @@ +// Package connhelper provides helpers for connecting to a remote daemon host with custom logic. +package connhelper + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/url" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/cli/cli/connhelper/ssh" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ConnectionHelper allows to connect to a remote host with custom stream provider binary. +type ConnectionHelper struct { + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + Host string // dummy URL used for HTTP requests. e.g. "http://docker" +} + +// GetConnectionHelper returns Docker-specific connection helper for the given URL. +// GetConnectionHelper returns nil without error when no helper is registered for the scheme. +// URL is like "ssh://me@server01". +func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + switch scheme := u.Scheme; scheme { + case "ssh": + sshCmd, sshArgs, err := ssh.New(daemonURL) + if err != nil { + return nil, err + } + return &ConnectionHelper{ + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return newCommandConn(ctx, sshCmd, sshArgs...) + }, + Host: "http://docker", + }, nil + } + // Future version may support plugins via ~/.docker/config.json. e.g. "dind" + // See docker/cli#889 for the previous discussion. + return nil, err +} + +func newCommandConn(ctx context.Context, cmd string, args ...string) (net.Conn, error) { + var ( + c commandConn + err error + ) + c.cmd = exec.CommandContext(ctx, cmd, args...) + // we assume that args never contains sensitive information + logrus.Debugf("connhelper: starting %s with %v", cmd, args) + c.cmd.Env = os.Environ() + setPdeathsig(c.cmd) + c.stdin, err = c.cmd.StdinPipe() + if err != nil { + return nil, err + } + c.stdout, err = c.cmd.StdoutPipe() + if err != nil { + return nil, err + } + c.cmd.Stderr = &stderrWriter{ + stderrMu: &c.stderrMu, + stderr: &c.stderr, + debugPrefix: fmt.Sprintf("connhelper (%s):", cmd), + } + c.localAddr = dummyAddr{network: "dummy", s: "dummy-0"} + c.remoteAddr = dummyAddr{network: "dummy", s: "dummy-1"} + return &c, c.cmd.Start() +} + +// commandConn implements net.Conn +type commandConn struct { + cmd *exec.Cmd + cmdExited bool + cmdWaitErr error + cmdMutex sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderrMu sync.Mutex + stderr bytes.Buffer + stdioClosedMu sync.Mutex // for stdinClosed and stdoutClosed + stdinClosed bool + stdoutClosed bool + localAddr net.Addr + remoteAddr net.Addr +} + +// killIfStdioClosed kills the cmd if both stdin and stdout are closed. +func (c *commandConn) killIfStdioClosed() error { + c.stdioClosedMu.Lock() + stdioClosed := c.stdoutClosed && c.stdinClosed + c.stdioClosedMu.Unlock() + if !stdioClosed { + return nil + } + return c.kill() +} + +// killAndWait tries sending SIGTERM to the process before sending SIGKILL. +func killAndWait(cmd *exec.Cmd) error { + var werr error + if runtime.GOOS != "windows" { + werrCh := make(chan error) + go func() { werrCh <- cmd.Wait() }() + cmd.Process.Signal(syscall.SIGTERM) + select { + case werr = <-werrCh: + case <-time.After(3 * time.Second): + cmd.Process.Kill() + werr = <-werrCh + } + } else { + cmd.Process.Kill() + werr = cmd.Wait() + } + return werr +} + +// kill returns nil if the command terminated, regardless to the exit status. +func (c *commandConn) kill() error { + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werr = killAndWait(c.cmd) + c.cmdWaitErr = werr + c.cmdExited = true + } + c.cmdMutex.Unlock() + if werr == nil { + return nil + } + wExitErr, ok := werr.(*exec.ExitError) + if ok { + if wExitErr.ProcessState.Exited() { + return nil + } + } + return errors.Wrapf(werr, "connhelper: failed to wait") +} + +func (c *commandConn) onEOF(eof error) error { + // when we got EOF, the command is going to be terminated + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werrCh := make(chan error) + go func() { werrCh <- c.cmd.Wait() }() + select { + case werr = <-werrCh: + c.cmdWaitErr = werr + c.cmdExited = true + case <-time.After(10 * time.Second): + c.cmdMutex.Unlock() + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr) + } + } + c.cmdMutex.Unlock() + if werr == nil { + return eof + } + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v has exited with %v, please make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%q", c.cmd.Args, werr, stderr) +} + +func ignorableCloseError(err error) bool { + errS := err.Error() + ss := []string{ + os.ErrClosed.Error(), + } + for _, s := range ss { + if strings.Contains(errS, s) { + return true + } + } + return false +} + +func (c *commandConn) CloseRead() error { + // NOTE: maybe already closed here + if err := c.stdout.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + c.stdioClosedMu.Lock() + c.stdoutClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + return nil +} + +func (c *commandConn) Read(p []byte) (int, error) { + n, err := c.stdout.Read(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) CloseWrite() error { + // NOTE: maybe already closed here + if err := c.stdin.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + c.stdioClosedMu.Lock() + c.stdinClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + return nil +} + +func (c *commandConn) Write(p []byte) (int, error) { + n, err := c.stdin.Write(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) Close() error { + var err error + if err = c.CloseRead(); err != nil { + logrus.Warnf("commandConn.Close: CloseRead: %v", err) + } + if err = c.CloseWrite(); err != nil { + logrus.Warnf("commandConn.Close: CloseWrite: %v", err) + } + return err +} + +func (c *commandConn) LocalAddr() net.Addr { + return c.localAddr +} +func (c *commandConn) RemoteAddr() net.Addr { + return c.remoteAddr +} +func (c *commandConn) SetDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetDeadline(%v)", t) + return nil +} +func (c *commandConn) SetReadDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t) + return nil +} +func (c *commandConn) SetWriteDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t) + return nil +} + +type dummyAddr struct { + network string + s string +} + +func (d dummyAddr) Network() string { + return d.network +} + +func (d dummyAddr) String() string { + return d.s +} + +type stderrWriter struct { + stderrMu *sync.Mutex + stderr *bytes.Buffer + debugPrefix string +} + +func (w *stderrWriter) Write(p []byte) (int, error) { + logrus.Debugf("%s%s", w.debugPrefix, string(p)) + w.stderrMu.Lock() + if w.stderr.Len() > 4096 { + w.stderr.Reset() + } + n, err := w.stderr.Write(p) + w.stderrMu.Unlock() + return n, err +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_linux.go new file mode 100644 index 000000000..f138f5367 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_linux.go @@ -0,0 +1,12 @@ +package connhelper + +import ( + "os/exec" + "syscall" +) + +func setPdeathsig(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_nolinux.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_nolinux.go new file mode 100644 index 000000000..c8350d9d7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper_nolinux.go @@ -0,0 +1,10 @@ +// +build !linux + +package connhelper + +import ( + "os/exec" +) + +func setPdeathsig(cmd *exec.Cmd) { +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go new file mode 100644 index 000000000..0f2cfabc0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go @@ -0,0 +1,70 @@ +// Package ssh provides the connection helper for ssh:// URL. +// Requires Docker 18.09 or later on the remote host. +package ssh + +import ( + "net/url" + + "github.com/pkg/errors" +) + +// New returns cmd and its args +func New(daemonURL string) (string, []string, error) { + sp, err := parseSSHURL(daemonURL) + if err != nil { + return "", nil, err + } + return "ssh", append(sp.Args(), []string{"--", "docker", "system", "dial-stdio"}...), nil +} + +func parseSSHURL(daemonURL string) (*sshSpec, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + if u.Scheme != "ssh" { + return nil, errors.Errorf("expected scheme ssh, got %s", u.Scheme) + } + + var sp sshSpec + + if u.User != nil { + sp.user = u.User.Username() + if _, ok := u.User.Password(); ok { + return nil, errors.New("ssh helper does not accept plain-text password") + } + } + sp.host = u.Hostname() + if sp.host == "" { + return nil, errors.Errorf("host is not specified") + } + sp.port = u.Port() + if u.Path != "" { + return nil, errors.Errorf("extra path: %s", u.Path) + } + if u.RawQuery != "" { + return nil, errors.Errorf("extra query: %s", u.RawQuery) + } + if u.Fragment != "" { + return nil, errors.Errorf("extra fragment: %s", u.Fragment) + } + return &sp, err +} + +type sshSpec struct { + user string + host string + port string +} + +func (sp *sshSpec) Args() []string { + var args []string + if sp.user != "" { + args = append(args, "-l", sp.user) + } + if sp.port != "" { + args = append(args, "-p", sp.port) + } + args = append(args, sp.host) + return args +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/debug/debug.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/debug/debug.go new file mode 100644 index 000000000..b00ea63ad --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/flags/common.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/flags/common.go index 3834097c3..22faf12ca 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/flags/common.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/flags/common.go @@ -67,7 +67,8 @@ func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") - hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + // opts.ValidateHost is not used here, so as to allow connection helpers + hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, nil) flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/client.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/client.go index 35a110254..6fd18a897 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/client.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/client.go @@ -7,6 +7,7 @@ import ( "strings" manifesttypes "github.com/docker/cli/cli/manifest/types" + "github.com/docker/cli/cli/trust" "github.com/docker/distribution" "github.com/docker/distribution/reference" distributionclient "github.com/docker/distribution/registry/client" @@ -24,6 +25,7 @@ type RegistryClient interface { GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) + GetTags(ctx context.Context, ref reference.Named) ([]string, error) } // NewRegistryClient returns a new RegistryClient with a resolver @@ -122,6 +124,19 @@ func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest return dgst, errors.Wrapf(err, "failed to put manifest %s", ref) } +func (c *client) GetTags(ctx context.Context, ref reference.Named) ([]string, error) { + repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry) + if err != nil { + return nil, err + } + + repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint) + if err != nil { + return nil, err + } + return repo.Tags(ctx).All(ctx) +} + func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) { httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint) if err != nil { @@ -181,3 +196,16 @@ func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []dist } return "", nil, errors.Errorf("%s no tag or digest", ref) } + +// GetRegistryAuth returns the auth config given an input image +func GetRegistryAuth(ctx context.Context, resolver AuthConfigResolver, imageName string) (*types.AuthConfig, error) { + distributionRef, err := reference.ParseNormalizedNamed(imageName) + if err != nil { + return nil, fmt.Errorf("Failed to parse image name: %s: %s", imageName, err) + } + imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, resolver, distributionRef.String()) + if err != nil { + return nil, fmt.Errorf("Failed to get imgRefAndAuth: %s", err) + } + return imgRefAndAuth.AuthConfig(), nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/fetcher.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/fetcher.go index 66c11ce22..e3d6cd606 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/fetcher.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/registry/client/fetcher.go @@ -200,7 +200,7 @@ func continueOnError(err error) bool { } func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error { - endpoints, err := allEndpoints(namedRef) + endpoints, err := allEndpoints(namedRef, c.insecureRegistry) if err != nil { return err } @@ -231,7 +231,7 @@ func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, repoEndpoint := repositoryEndpoint{endpoint: endpoint, info: repoInfo} repo, err := c.getRepositoryForReference(ctx, namedRef, repoEndpoint) if err != nil { - logrus.Debugf("error with repo endpoint %s: %s", repoEndpoint, err) + logrus.Debugf("error %s with repo endpoint %+v", err, repoEndpoint) if _, ok := err.(ErrHTTPProto); ok { continue } @@ -262,12 +262,18 @@ func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, } // allEndpoints returns a list of endpoints ordered by priority (v2, https, v1). -func allEndpoints(namedRef reference.Named) ([]registry.APIEndpoint, error) { +func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) { repoInfo, err := registry.ParseRepositoryInfo(namedRef) if err != nil { return nil, err } - registryService, err := registry.NewService(registry.ServiceOptions{}) + + var serviceOpts registry.ServiceOptions + if insecure { + logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef)) + serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)} + } + registryService, err := registry.NewService(serviceOpts) if err != nil { return []registry.APIEndpoint{}, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/envfile.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/envfile.go index 10054c896..69d3ca6f6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/envfile.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/envfile.go @@ -18,5 +18,5 @@ import ( // environment variables, that's why we just strip leading whitespace and // nothing more. func ParseEnvFile(filename string) ([]string, error) { - return parseKeyValueFile(filename, os.Getenv) + return parseKeyValueFile(filename, os.LookupEnv) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/file.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/file.go index 281905949..1721a46ef 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/file.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/file.go @@ -21,7 +21,7 @@ func (e ErrBadKey) Error() string { return fmt.Sprintf("poorly formatted environment: %s", e.msg) } -func parseKeyValueFile(filename string, emptyFn func(string) string) ([]string, error) { +func parseKeyValueFile(filename string, emptyFn func(string) (string, bool)) ([]string, error) { fh, err := os.Open(filename) if err != nil { return []string{}, err @@ -53,17 +53,23 @@ func parseKeyValueFile(filename string, emptyFn func(string) string) ([]string, if strings.ContainsAny(variable, whiteSpaces) { return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' has white spaces", variable)} } + if len(variable) == 0 { + return []string{}, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)} + } if len(data) > 1 { // pass the value through, no trimming lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) } else { var value string + var present bool if emptyFn != nil { - value = emptyFn(line) + value, present = emptyFn(line) + } + if present { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value)) } - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value)) } } } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/hosts.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/hosts.go index 594cccf2f..408bc24a0 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/hosts.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/hosts.go @@ -77,6 +77,8 @@ func parseDockerDaemonHost(addr string) (string, error) { return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) case "fd": return addr, nil + case "ssh": + return addr, nil default: return "", fmt.Errorf("Invalid bind address format: %s", addr) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/opts.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/opts.go index 51519e03b..6485e309e 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/opts.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/opts.go @@ -6,11 +6,11 @@ import ( "net" "path" "regexp" - "strconv" "strings" "github.com/docker/docker/api/types/filters" units "github.com/docker/go-units" + "github.com/pkg/errors" ) var ( @@ -307,6 +307,17 @@ func ValidateSysctl(val string) (string, error) { return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) } +// ValidateProgressOutput errors out if an invalid value is passed to --progress +func ValidateProgressOutput(val string) error { + valid := []string{"auto", "plain", "tty"} + for _, s := range valid { + if s == val { + return nil + } + } + return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", ")) +} + // FilterOpt is a flag type for validating filters type FilterOpt struct { filter filters.Args @@ -318,7 +329,7 @@ func NewFilterOpt() FilterOpt { } func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) + repr, err := filters.ToJSON(o.filter) if err != nil { return "invalid filters" } @@ -327,9 +338,18 @@ func (o *FilterOpt) String() string { // Set sets the value of the opt by parsing the command line value func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err + if value == "" { + return nil + } + if !strings.Contains(value, "=") { + return errors.New("bad format of filter (expected name=value)") + } + f := strings.SplitN(value, "=", 2) + name := strings.ToLower(strings.TrimSpace(f[0])) + value = strings.TrimSpace(f[1]) + + o.filter.Add(name, value) + return nil } // Type returns the option type @@ -487,38 +507,3 @@ func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { b := MemBytes(*m) return b.UnmarshalJSON(s) } - -// NullableBool is a type for tri-state boolean options -type NullableBool struct { - b *bool -} - -// Type returns the type -func (n *NullableBool) Type() string { - return "" -} - -// Value returns the value in *bool -func (n *NullableBool) Value() *bool { - return n.b -} - -// Set sets the value. If value is empty string or "auto", nil is set. -// Otherwise true or false are set based on flag.Bool behavior. -func (n *NullableBool) Set(value string) error { - if value != "auto" && value != "" { - b, err := strconv.ParseBool(value) - if err != nil { - return err - } - n.b = &b - } - return nil -} - -func (n *NullableBool) String() string { - if n.b == nil { - return "auto" - } - return strconv.FormatBool(*n.b) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/parse.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/parse.go index 679759ded..70b60e142 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/parse.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/parse.go @@ -19,10 +19,10 @@ func ReadKVStrings(files []string, override []string) ([]string, error) { // present in the file with additional pairs specified in the override parameter. // If a key has no value, it will get the value from the environment. func ReadKVEnvStrings(files []string, override []string) ([]string, error) { - return readKVStrings(files, override, os.Getenv) + return readKVStrings(files, override, os.LookupEnv) } -func readKVStrings(files []string, override []string, emptyFn func(string) string) ([]string, error) { +func readKVStrings(files []string, override []string, emptyFn func(string) (string, bool)) ([]string, error) { variables := []string{} for _, ef := range files { parsedVars, err := parseKeyValueFile(ef, emptyFn) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/port.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/port.go index 201aefafc..a4a91b1d5 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/port.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/opts/port.go @@ -151,17 +151,22 @@ func ConvertPortToPortConfig( if binding.HostIP != "" && binding.HostIP != "0.0.0.0" { logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port) } - hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + + startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort) + if err != nil && binding.HostPort != "" { return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) } - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) + + for i := startHostPort; i <= endHostPort; i++ { + ports = append(ports, swarm.PortConfig{ + //TODO Name: ? + Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), + TargetPort: uint32(port.Int()), + PublishedPort: uint32(i), + PublishMode: swarm.PortConfigPublishModeIngress, + }) + } } return ports, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/types/types.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/types/types.go new file mode 100644 index 000000000..e98336539 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/types/types.go @@ -0,0 +1,88 @@ +package types + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + ver "github.com/hashicorp/go-version" +) + +const ( + // CommunityEngineImage is the repo name for the community engine + CommunityEngineImage = "engine-community" + + // EnterpriseEngineImage is the repo name for the enterprise engine + EnterpriseEngineImage = "engine-enterprise" + + // RegistryPrefix is the default prefix used to pull engine images + RegistryPrefix = "docker.io/store/docker" + + // ReleaseNotePrefix is where to point users to for release notes + ReleaseNotePrefix = "https://docker.com/engine/releasenotes" + + // RuntimeMetadataName is the name of the runtime metadata file + // When stored as a label on the container it is prefixed by "com.docker." + RuntimeMetadataName = "distribution_based_engine" +) + +// ContainerizedClient can be used to manage the lifecycle of +// dockerd running as a container on containerd. +type ContainerizedClient interface { + Close() error + ActivateEngine(ctx context.Context, + opts EngineInitOptions, + out OutStream, + authConfig *types.AuthConfig) error + DoUpdate(ctx context.Context, + opts EngineInitOptions, + out OutStream, + authConfig *types.AuthConfig) error +} + +// EngineInitOptions contains the configuration settings +// use during initialization of a containerized docker engine +type EngineInitOptions struct { + RegistryPrefix string + EngineImage string + EngineVersion string + ConfigFile string + RuntimeMetadataDir string +} + +// AvailableVersions groups the available versions which were discovered +type AvailableVersions struct { + Downgrades []DockerVersion + Patches []DockerVersion + Upgrades []DockerVersion +} + +// DockerVersion wraps a semantic version to retain the original tag +// since the docker date based versions don't strictly follow semantic +// versioning (leading zeros, etc.) +type DockerVersion struct { + ver.Version + Tag string +} + +// Update stores available updates for rendering in a table +type Update struct { + Type string + Version string + Notes string +} + +// OutStream is an output stream used to write normal program output. +type OutStream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// RuntimeMetadata holds platform information about the daemon +type RuntimeMetadata struct { + Platform string `json:"platform"` + ContainerdMinVersion string `json:"containerd_min_version"` + Runtime string `json:"runtime"` + EngineImage string `json:"engine_image"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/vendor.conf b/src/cmd/linuxkit/vendor/github.com/docker/cli/vendor.conf index fc678e334..66153774a 100755 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/vendor.conf +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/vendor.conf @@ -1,96 +1,99 @@ -github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667 -github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b -github.com/coreos/etcd v3.2.1 -github.com/cpuguy83/go-md2man v1.0.8 -github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 -github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 -github.com/docker/docker 371b590ace0d4a329cd6a3328d31d33c4f77a780 https://github.com/docker/engine -github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962 -# the docker/go package contains a customized version of canonical/json -# and is used by Notary. The package is periodically rebased on current Go versions. -github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 -github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 -github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 -github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 -github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -github.com/docker/swarmkit 199cf49cd99690135d99e52a1907ec82e8113c4f -github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46 -github.com/emicklei/go-restful-swagger12 dcef7f55730566d41eae5db10e7d6981829720f6 -github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff -github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 -github.com/gogo/protobuf v1.0.0 -github.com/google/go-cmp v0.2.0 -github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed -github.com/golang/protobuf v1.1.0 -github.com/google/btree 316fb6d3f031ae8f4d457c6c5186b9e3ded70435 -github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c -github.com/googleapis/gnostic e4f56557df6250e1945ee6854f181ce4e1c2c646 -github.com/gorilla/context v1.1 -github.com/gorilla/mux v1.1 -gotest.tools v2.1.0 -github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98 -github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 -github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff -github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72 -github.com/gregjones/httpcache c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa -github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b -github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 -github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -github.com/howeyc/gopass 3ca23474a7c7203e0a0a070fd33508f6efdb9b3d -github.com/imdario/mergo v0.3.5 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342 -github.com/json-iterator/go 6240e1e7983a85228f7fd9c3e1b6932d46ec58e2 -github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0 -github.com/mattn/go-shellwords v1.0.3 -github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/Microsoft/go-winio v0.4.8 -github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb -github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb -github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b -github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty -github.com/opencontainers/go-digest v1.0.0-rc1 -github.com/opencontainers/image-spec v1.0.1 -github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1 -github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 -github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6 -github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 -github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 -github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -github.com/PuerkitoBio/purell 8a290539e2e8629dbc4e6bad948158f790ec31f4 -github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e -github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438 -github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77 -github.com/sirupsen/logrus v1.0.3 -github.com/spf13/cobra v0.0.3 -github.com/spf13/pflag v1.0.1 -github.com/theupdateframework/notary v0.6.1 -github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb -github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a -github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 -github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 -golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd -golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 -golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd -golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 -google.golang.org/grpc v1.12.0 -gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 -gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 -k8s.io/api kubernetes-1.8.14 -k8s.io/apimachinery kubernetes-1.8.14 -k8s.io/client-go kubernetes-1.8.14 -k8s.io/kubernetes v1.8.14 -k8s.io/kube-openapi 0c329704159e3b051aafac400b15baacf2a94a04 -vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d -github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761 -github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d -github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403 +github.com/asaskevich/govalidator f9ffefc3facfbe0caee3fea233cbb6e8208f4541 +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/beorn7/perks 3a771d992973f24aa725d07868b467d1ddfceafb +github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23 +github.com/containerd/containerd bb0f83ab6eec47c3316bb763d5c20a82c7750c31 +github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371 +github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c +github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 +github.com/coreos/etcd fca8add78a9d926166eb739b8e4a124434025ba3 # v3.3.9 +github.com/cpuguy83/go-md2man 20f5889cbdc3c73dbd2862796665e7c465ade7d1 # v1.0.8 +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 # v1.1.0 +github.com/dgrijalva/jwt-go a2c85815a77d0f951e33ba4db5ae93629a1530af +github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 +github.com/docker/docker 200b524eff60a9c95a22bc2518042ac2ff617d07 https://github.com/docker/engine # 18.09 branch +github.com/docker/docker-credential-helpers 54f0238b6bf101fc3ad3b34114cb5520beb562f5 # v0.6.3 +github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 # Contains a customized version of canonical/json and is used by Notary. The package is periodically rebased on current Go versions. +github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0 +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 +github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/docker/licensing 9781369abdb5281cdc07a2a446c6df01347ec793 +github.com/docker/swarmkit cfa742c8abe6f8e922f6e4e920153c408e7d9c3b +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff +github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 # v1.0.0 +github.com/gogo/googleapis b23578765ee54ff6bceff57f397d833bf4ca6869 +github.com/gogo/protobuf 636bf0302bc95575d69441b25a2603156ffdddf1 # v1.1.1 +github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +github.com/golang/protobuf b4deda0973fb4c70b50d226b1af49f3da59f5265 # v1.1.0 +github.com/google/btree e89373fe6b4a7413d7acd6da1725b83ef713e6e4 +github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0 +github.com/google/gofuzz 24818f796faf91cd76ec7bddd72458fbced7a6c1 +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1 +github.com/googleapis/gnostic 7c663266750e7d82587642f65e60bc4083f1f84e # v0.2.0 +github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 # v1.1.1 +github.com/gorilla/mux e3702bed27f0d39777b0b37b664b6280e8ef8fbf # v1.6.2 +github.com/gregjones/httpcache 9cad4c3443a7200dd6400aef47183728de563a38 +github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b +github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 +github.com/hashicorp/go-version 23480c0665776210b5fbbac6eaaee40e3e6a96b7 +github.com/hashicorp/golang-lru 0fb14efe8c47ae851c0034ed7a448854d3d34cf3 +github.com/imdario/mergo 9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4 # v0.3.6 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0.0 +github.com/json-iterator/go ab8a2e0c74be9d3be70b3184d9acc634935ded82 # 1.1.4 +github.com/mattn/go-shellwords 02e3cf038dcea8290e44424da473dd12be796a8a # v1.0.3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 +github.com/Microsoft/go-winio 78a084671df137c2acfcacaa730d7e7dc285ac39 # v0.4.10 +github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55 +github.com/miekg/pkcs11 6120d95c0e9576ccf4a78ba40855809dca31a9ed +github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac +github.com/moby/buildkit 05766c5c21a1e528eeb1c3522b2f05493fe9ac47 +github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3 +github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1 +github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b +github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty +github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1 +github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 +github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd +github.com/opencontainers/runtime-spec 4e3b9264a330d094b0386c3703c5f379119711e8 # v1.0.1 +github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6 # v2.0.1 +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +github.com/russross/blackfriday 1d6b8e9301e720b08a8938b8c25c018285885438 +github.com/shurcooL/sanitized_anchor_name 10ef21a441db47d8b13ebcc5fd2310f636973c77 +github.com/sirupsen/logrus 3e01752db0189b9157070a0e1668a620f9a85da2 # v1.0.6 +github.com/spf13/cobra ef82de70bb3f60c65fb8eebacbb2d122ef517385 # v0.0.3 +github.com/spf13/pflag 4cb166e4f25ac4e8016a3595bbf7ea2e9aa85a2c https://github.com/thaJeztah/pflag.git # temporary fork with https://github.com/spf13/pflag/pull/170 applied, which isn't merged yet upstream +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/theupdateframework/notary d6e1431feb32348e0650bf7551ac5cffd01d857b # v0.6.1 +github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca +github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2 +github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6 +github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4 +golang.org/x/net a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1 +golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca +golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 +golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0 +golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650 +google.golang.org/genproto 02b4e95473316948020af0b7a4f0f22c73929b0e +google.golang.org/grpc 41344da2231b913fa3d983840a57a6b1b7b631a1 # v1.12.0 +gopkg.in/inf.v0 d2d2541c53f18d2a059457998ce2876cc8e67cbf # v0.9.1 +gopkg.in/yaml.v2 5420a8b6744d3b0345ab293f6fcba19c978f1183 # v2.2.1 +gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0 +k8s.io/api 2d6f90ab1293a1fb871cf149423ebb72aa7423aa # kubernetes-1.11.2 +k8s.io/apimachinery 103fd098999dc9c0c88536f5c9ad2e5da39373ae # kubernetes-1.11.2 +k8s.io/client-go 1f13a808da65775f22cbf47862c4e5898d8f4ca1 # kubernetes-1.11.2 +k8s.io/kube-openapi d8ea2fe547a448256204cfc68dfee7b26c720acb +k8s.io/kubernetes bb9ffb1654d4a729bb4cec18ff088eacc153c239 # v1.11.2 +vbom.ml/util 256737ac55c46798123f754ab7d2c784e2c71783 + +# DO NOT EDIT BELOW THIS LINE -------- reserved for downstream projects -------- diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/README.md b/src/cmd/linuxkit/vendor/github.com/docker/distribution/README.md index 998878850..20d81eead 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/README.md +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/README.md @@ -2,31 +2,32 @@ The Docker toolset to pack, ship, store, and deliver content. -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. +This repository's main product is the Open Source Docker Registry implementation +for storing and distributing Docker and OCI images using the +[OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). +The goal of this project is to provide a simple, secure, and scalable base +for building a registry solution or running a simple private registry. -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![Build Status](https://travis-ci.org/docker/distribution.svg?branch=master)](https://travis-ci.org/docker/distribution) [![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **registry** | An implementation of the [OCI Distribution Specification](https://github.com/opencontainers/distribution-spec). | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: The interfaces for these libraries are **unstable**. | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | -### How does this integrate with Docker engine? +### How does this integrate with Docker, containerd, and other OCI client? -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. +Clients implement against the OCI specification and communicate with the +registry using HTTP. This project contains an client implementation which +is currently in use by Docker, however, it is deprecated for the +[implementation in containerd](https://github.com/containerd/containerd/tree/master/remotes/docker) +and will not support new features. ### What are the long term goals of the Distribution project? @@ -43,18 +44,6 @@ system that allow users to: * Implement their own home made solution through good specs, and solid extensions mechanism. -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. @@ -78,53 +67,25 @@ For those who have previously deployed their own registry based on the Registry data migration is required. A tool to assist with migration efforts has been created. For more information see [docker/migrator](https://github.com/docker/migrator). -## Contribute +## Contribution Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see the instructions for [building a development environment](BUILDING.md). -## Support +## Communication -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: +For async communication and long running discussions please use issues and pull requests on the github repo. +This will be the best place to discuss design and implementation. - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
+For sync communication we have a community slack with a #distribution channel that everyone is welcome to join and chat about development. +**Slack:** Catch us in the #distribution channels on dockercommunity.slack.com. +[Click here for an invite to Docker community slack.](https://dockr.ly/slack) -## License +## Licenses -This project is distributed under [Apache License, Version 2.0](LICENSE). +The distribution codebase is released under the [Apache 2.0 license](LICENSE). +The README.md file, and files in the "docs" folder are licensed under the +Creative Commons Attribution 4.0 International License. You may obtain a +copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/blobs.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/blobs.go index 145b07853..2a659eaa3 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/blobs.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/blobs.go @@ -10,6 +10,7 @@ import ( "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) var ( @@ -66,12 +67,19 @@ type Descriptor struct { Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. + // against this digest. Digest digest.Digest `json:"digest,omitempty"` // URLs contains the source URLs of this content. URLs []string `json:"urls,omitempty"` + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/context.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/context.go new file mode 100644 index 000000000..ab6865467 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/context.go @@ -0,0 +1,73 @@ +package context + +import ( + "context" + "sync" + + "github.com/docker/distribution/uuid" +) + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + context.Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() context.Context { + return background +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/doc.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/doc.go new file mode 100644 index 000000000..0c631a9c9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/doc.go @@ -0,0 +1,88 @@ +// Package context provides several utilities for working with +// Go's context in http requests. Primarily, the focus is on logging relevant +// request information but this package is not limited to that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped variables. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/http.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/http.go new file mode 100644 index 000000000..bc22f0bba --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/http.go @@ -0,0 +1,337 @@ +package context + +import ( + "context" + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx context.Context, r *http.Request) context.Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx context.Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx context.Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx context.Context, r *http.Request) context.Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx context.Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx context.Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + context.Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + context.Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + context.Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/logger.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/logger.go new file mode 100644 index 000000000..3e5b81bbf --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/logger.go @@ -0,0 +1,121 @@ +package context + +import ( + "context" + "fmt" + "runtime" + + "github.com/sirupsen/logrus" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) + + WithError(err error) *logrus.Entry +} + +type loggerKey struct{} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx context.Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value(loggerKey{}) + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/trace.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/trace.go new file mode 100644 index 000000000..5b88ddaf4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/trace.go @@ -0,0 +1,105 @@ +package context + +import ( + "context" + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapsed time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + context.Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/util.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/util.go new file mode 100644 index 000000000..c462e7563 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/util.go @@ -0,0 +1,25 @@ +package context + +import ( + "context" + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx context.Context, key interface{}) time.Duration { + if startedAt, ok := ctx.Value(key).(time.Time); ok { + return time.Since(startedAt) + } + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx context.Context, key interface{}) (value string) { + if valuev, ok := ctx.Value(key).(string); ok { + value = valuev + } + return value +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/version.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/version.go new file mode 100644 index 000000000..97cf9d665 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/context/version.go @@ -0,0 +1,22 @@ +package context + +import "context" + +type versionKey struct{} + +func (versionKey) String() string { return "version" } + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx context.Context, version string) context.Context { + ctx = context.WithValue(ctx, versionKey{}, version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, versionKey{})) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx context.Context) string { + return GetStringValue(ctx, versionKey{}) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/errors.go index 020d33258..8e0b788d6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/errors.go @@ -20,6 +20,10 @@ var ErrManifestNotModified = errors.New("manifest not modified") // performed var ErrUnsupported = errors.New("operation unsupported") +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/go.mod b/src/cmd/linuxkit/vendor/github.com/docker/distribution/go.mod new file mode 100644 index 000000000..7f8052ee5 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/go.mod @@ -0,0 +1,50 @@ +module github.com/docker/distribution + +go 1.12 + +require ( + github.com/Azure/azure-sdk-for-go v16.2.1+incompatible + github.com/Azure/go-autorest v10.8.1+incompatible // indirect + github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d + github.com/aws/aws-sdk-go v1.15.11 + github.com/bitly/go-simplejson v0.5.0 // indirect + github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/bshuster-repo/logrus-logstash-hook v0.4.1 + github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd + github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect + github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect + github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba + github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c // indirect + github.com/dnaeon/go-vcr v1.0.1 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c + github.com/docker/go-metrics v0.0.1 + github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 + github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 + github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 + github.com/gorilla/mux v1.7.2 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/marstr/guid v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f // indirect + github.com/ncw/swift v1.0.47 + github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/image-spec v1.0.1 + github.com/satori/go.uuid v1.2.0 // indirect + github.com/sirupsen/logrus v1.4.2 + github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 // indirect + github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect + github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 + github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect + golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d + golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b // indirect + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff + google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 + google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a // indirect + gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go index 3aa0662d9..3a1d73e83 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -8,10 +8,13 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest" "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" ) -// MediaTypeManifestList specifies the mediaType for manifest lists. -const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +const ( + // MediaTypeManifestList specifies the mediaType for manifest lists. + MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" +) // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. @@ -20,6 +23,13 @@ var SchemaVersion = manifest.Versioned{ MediaType: MediaTypeManifestList, } +// OCISchemaVersion provides a pre-initialized version structure for this +// packages OCIschema version of the manifest. +var OCISchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: v1.MediaTypeImageIndex, +} + func init() { manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifestList) @@ -28,6 +38,13 @@ func init() { return nil, distribution.Descriptor{}, err } + if m.MediaType != MediaTypeManifestList { + err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", + MediaTypeManifestList, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err } @@ -35,6 +52,28 @@ func init() { if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } + + imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex { + err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", + v1.MediaTypeImageIndex, m.MediaType) + + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err + } + err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err)) + } } // PlatformSpec specifies a platform where a particular image manifest is @@ -105,11 +144,26 @@ type DeserializedManifestList struct { // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - m := ManifestList{ - Versioned: SchemaVersion, + var mediaType string + if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = MediaTypeManifestList } - m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + return FromDescriptorsWithMediaType(descriptors, mediaType) +} + +// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly +func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mediaType, + }, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors)) copy(m.Manifests, descriptors) deserialized := DeserializedManifestList{ @@ -123,7 +177,7 @@ func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestLis // UnmarshalJSON populates a new ManifestList struct from JSON data. func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest list in canonical copy(m.canonical, b) @@ -151,5 +205,12 @@ func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { // Payload returns the raw content of the manifest list. The contents can be // used to calculate the content identifier. func (m DeserializedManifestList) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil + var mediaType string + if m.MediaType == "" { + mediaType = v1.MediaTypeImageIndex + } else { + mediaType = m.MediaType + } + + return mediaType, m.canonical, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/builder.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/builder.go new file mode 100644 index 000000000..b89bf5b71 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/builder.go @@ -0,0 +1,107 @@ +package ocischema + +import ( + "context" + "errors" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Builder is a type for constructing manifests. +type Builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configJSON references + configJSON []byte + + // layers is a list of layer descriptors that gets built by successive + // calls to AppendReference. + layers []distribution.Descriptor + + // Annotations contains arbitrary metadata relating to the targeted content. + annotations map[string]string + + // For testing purposes + mediaType string +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process, and annotations. +func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder { + mb := &Builder{ + bs: bs, + configJSON: make([]byte, len(configJSON)), + annotations: annotations, + mediaType: v1.MediaTypeImageManifest, + } + copy(mb.configJSON, configJSON) + + return mb +} + +// SetMediaType assigns the passed mediatype or error if the mediatype is not a +// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json" +func (mb *Builder) SetMediaType(mediaType string) error { + if mediaType != "" && mediaType != v1.MediaTypeImageManifest { + return errors.New("invalid media type for OCI image manifest") + } + + mb.mediaType = mediaType + return nil +} + +// Build produces a final manifest from the given references. +func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: mb.mediaType, + }, + Layers: make([]distribution.Descriptor, len(mb.layers)), + Annotations: mb.annotations, + } + copy(m.Layers, mb.layers) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = v1.MediaTypeImageConfig + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = v1.MediaTypeImageConfig + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *Builder) AppendReference(d distribution.Describable) error { + mb.layers = append(mb.layers, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *Builder) References() []distribution.Descriptor { + return mb.layers +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go new file mode 100644 index 000000000..c5e85f285 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/ocischema/manifest.go @@ -0,0 +1,124 @@ +package ocischema + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version + MediaType: v1.MediaTypeImageManifest, + } +) + +func init() { + ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err + } + err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a ocischema manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// References returns the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest { + return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'", + v1.MediaTypeImageManifest, manifest.MediaType) + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return v1.MediaTypeImageManifest, m.canonical, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 000000000..a96dc3d26 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,287 @@ +package schema1 + +import ( + "context" + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + descriptor := d.Descriptor() + + if err := descriptor.Digest.Validate(); err != nil { + return err + } + + mb.descriptors = append(mb.descriptors, descriptor) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 000000000..9fef4dc7e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returns the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 000000000..0f1d386aa --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "context" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 000000000..c862dd812 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 000000000..ef59065cd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/docker/libtrust" + "github.com/sirupsen/logrus" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema2/manifest.go index a2708c750..41f480292 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema2/manifest.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -71,7 +71,7 @@ type Manifest struct { Layers []distribution.Descriptor `json:"layers"` } -// References returnes the descriptors of this manifests references. +// References returns the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) references = append(references, m.Config) @@ -79,7 +79,7 @@ func (m Manifest) References() []distribution.Descriptor { return references } -// Target returns the target of this signed manifest. +// Target returns the target of this manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } @@ -106,7 +106,7 @@ func FromStruct(m Manifest) (*DeserializedManifest, error) { // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) + m.canonical = make([]byte, len(b)) // store manifest in canonical copy(m.canonical, b) @@ -116,6 +116,12 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { return err } + if manifest.MediaType != MediaTypeManifest { + return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", + MediaTypeManifest, manifest.MediaType) + + } + m.Manifest = manifest return nil diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifests.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifests.go index 1816baea1..8f84a220a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifests.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/manifests.go @@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) { // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) -var mappings = make(map[string]UnmarshalFunc, 0) +var mappings = make(map[string]UnmarshalFunc) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/metrics/prometheus.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/metrics/prometheus.go index b5a532144..91b32b23d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/metrics/prometheus.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/metrics/prometheus.go @@ -10,4 +10,7 @@ const ( var ( // StorageNamespace is the prometheus namespace of blob/cache related operations StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) + + // NotificationsNamespace is the prometheus namespace of notification related metrics + NotificationsNamespace = metrics.NewNamespace(NamespacePrefix, "notifications", nil) ) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/normalize.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/normalize.go index 2d71fc5e9..b3dfb7a6d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/normalize.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/normalize.go @@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) { return named, nil } +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/reference.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/reference.go index 2f66cca87..8c0c23b2f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/reference.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/reference/reference.go @@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) { var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { + if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry.go index a3a80ab88..6c3210989 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry.go @@ -54,6 +54,11 @@ type RepositoryEnumerator interface { Enumerate(ctx context.Context, ingester func(string) error) error } +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index 6d9bb4b62..4c35b879a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) { for _, daErr := range errs { var err Error - switch daErr.(type) { + switch daErr := daErr.(type) { case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) + err = daErr.WithDetail(nil) case Error: - err = daErr.(Error) + err = daErr default: err = ErrorCodeUnknown.WithDetail(daErr) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/handler.go index d77e70473..ebb9ce927 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -9,7 +9,7 @@ import ( // and sets the content-type header to 'application/json'. It will handle // ErrorCoder and Errors, and if necessary will create an envelope. func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") var sc int switch errs := err.(type) { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index a9616c58a..cffacc3c0 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -126,7 +126,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -147,7 +147,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -168,7 +168,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -189,7 +189,7 @@ var ( }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -441,7 +441,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "name": , "tags": [ @@ -478,7 +478,7 @@ var routeDescriptors = []RouteDescriptor{ linkHeader, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "name": , "tags": [ @@ -541,7 +541,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeTagInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -592,7 +592,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", StatusCode: http.StatusBadRequest, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -615,7 +615,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "errors:" [{ "code": "BLOB_UNKNOWN", @@ -669,7 +669,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeTagInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -686,7 +686,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -766,7 +766,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -774,7 +774,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -838,7 +838,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -849,7 +849,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -905,7 +905,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -917,7 +917,7 @@ var routeDescriptors = []RouteDescriptor{ Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ @@ -1179,7 +1179,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1190,7 +1190,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1254,7 +1254,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1265,7 +1265,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1336,7 +1336,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1347,7 +1347,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1431,7 +1431,7 @@ var routeDescriptors = []RouteDescriptor{ errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1442,7 +1442,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1488,7 +1488,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1499,7 +1499,7 @@ var routeDescriptors = []RouteDescriptor{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: errorsBody, }, }, @@ -1539,7 +1539,7 @@ var routeDescriptors = []RouteDescriptor{ }, }, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "repositories": [ , @@ -1558,7 +1558,7 @@ var routeDescriptors = []RouteDescriptor{ { StatusCode: http.StatusOK, Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", + ContentType: "application/json", Format: `{ "repositories": [ , diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/routes.go index 5b80d5be7..9612ac2e5 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -14,15 +14,6 @@ const ( RouteNameCatalog = "catalog" ) -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 1337bdb12..3c3ec9893 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -252,15 +252,3 @@ func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { u.RawQuery = merged.Encode() return u } - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go index 6e3f1ccc4..fe238210c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -117,8 +117,8 @@ func init() { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { t |= isSpace } if isChar && !isCtl && !isSeparator { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/session.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/session.go index db86c9b06..5d2322f35 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -68,7 +68,6 @@ func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) type endpointAuthorizer struct { challenges challenge.Manager handlers []AuthenticationHandler - transport http.RoundTripper } func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { @@ -121,7 +120,6 @@ type clock interface { } type tokenHandler struct { - header http.Header creds CredentialStore transport http.RoundTripper clock clock @@ -368,6 +366,10 @@ func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, servic return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) } + if tr.AccessToken == "" { + return "", time.Time{}, ErrNoToken + } + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { th.creds.SetRefreshToken(realm, service, tr.RefreshToken) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/blob_writer.go index 695bf852f..cc6e88ca2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -64,8 +64,8 @@ func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { return 0, fmt.Errorf("bad range format: %s", rng) } + hbu.offset += end - start + 1 return (end - start + 1), nil - } func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { @@ -99,8 +99,8 @@ func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("bad range format: %s", rng) } + hbu.offset += int64(end - start + 1) return (end - start + 1), nil - } func (hbu *httpBlobUpload) Size() int64 { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/repository.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/repository.go index d8e2c795d..793a662d2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/repository.go @@ -16,7 +16,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" + v2 "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" @@ -81,9 +81,8 @@ func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) } type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context + client *http.Client + ub *v2.URLBuilder } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size @@ -152,10 +151,9 @@ func NewRepository(name reference.Named, baseURL string, transport http.RoundTri } type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named + client *http.Client + ub *v2.URLBuilder + name reference.Named } func (r *repository) Named() reference.Named { @@ -669,7 +667,28 @@ func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.Rea } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") + desc, err := bs.statter.Stat(ctx, dgst) + if err != nil { + return err + } + + w.Header().Set("Content-Length", strconv.FormatInt(desc.Size, 10)) + w.Header().Set("Content-Type", desc.MediaType) + w.Header().Set("Docker-Content-Digest", dgst.String()) + w.Header().Set("Etag", dgst.String()) + + if r.Method == http.MethodHead { + return nil + } + + blob, err := bs.Open(ctx, dgst) + if err != nil { + return err + } + defer blob.Close() + + _, err = io.CopyN(w, blob, desc.Size) + return err } func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { @@ -754,6 +773,14 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") + if uuid == "" { + parts := strings.Split(resp.Header.Get("Location"), "/") + uuid = parts[len(parts)-1] + } + if uuid == "" { + return nil, errors.New("cannot retrieve docker upload UUID") + } + location, err := sanitizeLocation(resp.Header.Get("Location"), u) if err != nil { return nil, err @@ -772,7 +799,18 @@ func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateO } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") + location, err := bs.ub.BuildBlobUploadChunkURL(bs.name, id) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: id, + startedAt: time.Now(), + location: location, + }, nil } func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go index e5ff09d75..1d0b382fb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "net/http" - "os" "regexp" "strconv" ) @@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { lastReaderOffset := hrs.readerOffset - if whence == os.SEEK_SET && hrs.rc == nil { + if whence == io.SeekStart && hrs.rc == nil { // If no request has been made yet, and we are seeking to an // absolute position, set the read offset as well to avoid an // unnecessary request. @@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { newOffset := hrs.seekOffset switch whence { - case os.SEEK_CUR: + case io.SeekCurrent: newOffset += offset - case os.SEEK_END: + case io.SeekEnd: if hrs.size < 0 { return 0, errors.New("content length not known") } newOffset = hrs.size + offset - case os.SEEK_SET: + case io.SeekStart: newOffset = offset } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/transport.go index 30e45fab0..ee23829f3 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -6,6 +6,13 @@ import ( "sync" ) +func identityTransportWrapper(rt http.RoundTripper) http.RoundTripper { + return rt +} + +// DefaultTransportWrapper allows a user to wrap every generated transport +var DefaultTransportWrapper = identityTransportWrapper + // RequestModifier represents an object which will do an inplace // modification of an HTTP request. type RequestModifier interface { @@ -31,10 +38,11 @@ func (h headerModifier) ModifyRequest(req *http.Request) error { // NewTransport creates a new transport which will apply modifiers to // the request on a RoundTrip call. func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } + return DefaultTransportWrapper( + &transport{ + Modifiers: modifiers, + Base: base, + }) } // transport is an http.RoundTripper that makes HTTP requests after diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go index ac4c45211..f25d68d9f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -4,39 +4,14 @@ import ( "context" "github.com/docker/distribution" + dcontext "github.com/docker/distribution/context" prometheus "github.com/docker/distribution/metrics" "github.com/opencontainers/go-digest" ) -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - type cachedBlobStatter struct { cache distribution.BlobDescriptorService backend distribution.BlobDescriptorService - tracker MetricsTracker } var ( @@ -53,47 +28,36 @@ func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend dist } } -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - goto fallback + // try getting from cache + desc, cacheErr := cbds.cache.Stat(ctx, dgst) + if cacheErr == nil { + cacheCount.WithValues("Hit").Inc(1) + return desc, nil } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) + + // couldn't get from cache; get from backend + desc, err := cbds.backend.Stat(ctx, dgst) if err != nil { return desc, err } - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + if cacheErr == distribution.ErrBlobUnknown { + // cache doesn't have info. update it with info got from backend + cacheCount.WithValues("Miss").Inc(1) + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc") + } + // we don't need to return cache error upstream if any. continue returning value from backend + } else { + // unknown error from cache. just log and error. do not store cache as it may be trigger many set calls + dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob") + cacheCount.WithValues("Error").Inc(1) } - return desc, err - + return desc, nil } func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { @@ -111,19 +75,7 @@ func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) er func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc") } return nil } - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/tags.go b/src/cmd/linuxkit/vendor/github.com/docker/distribution/tags.go index f22df2b85..6033575ce 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/tags.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/distribution/tags.go @@ -2,6 +2,8 @@ package distribution import ( "context" + + "github.com/opencontainers/go-digest" ) // TagService provides access to information about tagged objects. @@ -25,3 +27,11 @@ type TagService interface { // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) } + +// TagManifestsProvider provides method to retrieve the digests of manifests that a tag historically +// pointed to +type TagManifestsProvider interface { + // ManifestDigests returns set of digests that this tag historically pointed to. This also + // includes currently linked digest. There is no ordering guaranteed + ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/distribution/vendor.conf b/src/cmd/linuxkit/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index d0ebadf8b..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,49 +0,0 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/LICENSE b/src/cmd/linuxkit/vendor/github.com/docker/docker/LICENSE index 9c8e20ab8..6d8d58fb6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/LICENSE +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2017 Docker, Inc. + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/NOTICE b/src/cmd/linuxkit/vendor/github.com/docker/docker/NOTICE index 0c74e15b0..58b19b6d1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/NOTICE +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/kr/pty) developed +This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go index 255a81aed..aa146cdae 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.38" + DefaultVersion = "1.40" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go index 3b698c2c2..4b9f50282 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go @@ -187,6 +187,15 @@ type ImageBuildOptions struct { // build request. The same identifier can be used to gracefully cancel the // build with the cancel request. BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string } // BuilderVersion sets the version of underlying builder to use diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/config.go index 89ad08c23..f767195b9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/config.go @@ -54,7 +54,7 @@ type Config struct { Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_changes.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_changes.go index c909d6ca3..222d14100 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_create.go index 49efa0f2c..1ec9c3728 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_top.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_top.go index ba41edcf3..f8a606687 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_update.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_update.go index 7630ae54c..33addedf7 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_wait.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_wait.go index 9e3910a6b..94b6a20e1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/host_config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/host_config.go index 4ef26fa6c..c3de3d976 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -244,6 +244,16 @@ func (n PidMode) Container() string { return "" } +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string @@ -327,13 +337,14 @@ type Resources struct { CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup - DiskQuota int64 // Disk limit (in bytes) + DeviceRequests []DeviceRequest // List of device requests for device drivers KernelMemory int64 // Kernel memory limit (in bytes) + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows @@ -369,9 +380,10 @@ type HostConfig struct { // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/filters/parse.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/filters/parse.go index a41e3d8d9..0bd2e1e18 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -5,7 +5,6 @@ package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" - "errors" "regexp" "strings" @@ -37,39 +36,13 @@ func NewArgs(initialArgs ...KeyValuePair) Args { return args } -// ParseFlag parses a key=value string and adds it to an Args. -// -// Deprecated: Use Args.Add() -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON -func ToParam(a Args) (string, error) { - return ToJSON(a) + return keys } // MarshalJSON returns a JSON byte representation of the Args @@ -107,13 +80,6 @@ func ToParamWithVersion(version string, a Args) (string, error) { return ToJSON(a) } -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON -func FromParam(p string) (Args, error) { - return FromJSON(p) -} - // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -275,14 +241,6 @@ func (args Args) FuzzyMatch(key, source string) bool { return false } -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - // Contains returns true if the key exists in the mapping func (args Args) Contains(field string) bool { _, ok := args.fields[field] @@ -323,6 +281,22 @@ func (args Args) WalkValues(field string, op func(value string) error) error { return nil } +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_history.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_history.go index d6b354bcd..b5a7a0c49 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/mount/mount.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/mount/mount.go index 3fef974df..ab4446b38 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -79,7 +79,8 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/network/network.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/network/network.go index ccb448f23..71e97338f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/network/network.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/network/network.go @@ -112,12 +112,13 @@ type ConfigReference struct { } var acceptedFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - "scope": true, + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, } // ValidateFilters validates the list of filter args with the available filters. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/seccomp.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/seccomp.go index 67a41e1a8..2259c6be1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/seccomp.go @@ -77,8 +77,9 @@ type Arg struct { // Filter is used to conditionally apply Seccomp rules type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` + MinKernel string `json:"minKernel,omitempty"` } // Syscall is used to match a group of syscalls in Seccomp diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/stats.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/stats.go index 60175c061..20daebed1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/stats.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/stats.go @@ -120,7 +120,7 @@ type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` // Packets received. Windows and Linux. RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this + // Received errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. RxErrors uint64 `json:"rx_errors"` // Incoming packets dropped. Windows and Linux. @@ -129,7 +129,7 @@ type NetworkStats struct { TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // Sent errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go index a1555cf43..16202ccce 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -27,9 +27,14 @@ type ConfigReferenceFileTarget struct { Mode os.FileMode } +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + // ConfigReference is a reference to a config in swarm type ConfigReference struct { - File *ConfigReferenceFileTarget + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` ConfigID string ConfigName string } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/container.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/container.go index 151211ff5..48190c176 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -33,6 +33,7 @@ type SELinuxContext struct { // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { + Config string File string Registry string } @@ -71,4 +72,5 @@ type ContainerSpec struct { Secrets []*SecretReference `json:",omitempty"` Configs []*ConfigReference `json:",omitempty"` Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 1b111d725..b25f99964 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -1,6 +1,8 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" +) // ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens @@ -10,6 +12,9 @@ type ClusterInfo struct { Spec Spec TLSInfo TLSInfo RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 } // Swarm represents a swarm. @@ -149,10 +154,13 @@ type InitRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string + DataPathPort uint32 ForceNewCluster bool Spec Spec AutoLockManagers bool Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 } // JoinRequest is the request used to join a swarm. @@ -201,6 +209,8 @@ type Info struct { Managers int `json:",omitempty"` Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` } // Peer represents a peer. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go index b35605d12..d5a57df5d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -127,6 +127,7 @@ type ResourceRequirements struct { type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` // Platforms stores all the platforms that the image can run on. // This field is used in the platform filter for scheduling. If empty, diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go index 06c0ca3a6..a39ffcb7b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go @@ -102,9 +102,10 @@ type ContainerStats struct { // Ping contains response of Engine API: // GET "/_ping" type Ping struct { - APIVersion string - OSType string - Experimental bool + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion } // ComponentVersion describes the version information for a specific component. @@ -157,10 +158,12 @@ type Info struct { MemoryLimit bool SwapLimit bool KernelMemory bool + KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool + PidsLimit bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` @@ -204,6 +207,8 @@ type Info struct { RuncCommit Commit InitCommit Commit SecurityOptions []string + ProductLicense string `json:",omitempty"` + Warnings []string } // KeyValue holds a key/value pair @@ -540,6 +545,7 @@ type ImagesPruneReport struct { // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { + CachesDeleted []string SpaceReclaimed uint64 } @@ -589,14 +595,21 @@ type BuildResult struct { // BuildCache contains information about a build cache record type BuildCache struct { - ID string - Mutable bool - InUse bool - Size int64 - + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 CreatedAt time.Time LastUsedAt *time.Time UsageCount int - Parent string - Description string +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_create.go index 539e9b97d..0c3772d3a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package volume // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// VolumeCreateBody +// VolumeCreateBody Volume configuration // swagger:model VolumeCreateBody type VolumeCreateBody struct { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_list.go index 1bb279dbb..45c3c1c9a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -9,7 +9,7 @@ package volume import "github.com/docker/docker/api/types" -// VolumeListOKBody +// VolumeListOKBody Volume list response // swagger:model VolumeListOKBody type VolumeListOKBody struct { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/README.md index 059dfb3ce..992f18117 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/README.md +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/README.md @@ -16,7 +16,7 @@ import ( ) func main() { - cli, err := client.NewEnvClient() + cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_cancel.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_cancel.go index 4cf8c980a..3aae43e3d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_cancel.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_cancel.go @@ -1,9 +1,8 @@ package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // BuildCancel requests the daemon to cancel ongoing build request @@ -12,10 +11,6 @@ func (cli *Client) BuildCancel(ctx context.Context, id string) error { query.Set("id", id) serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - if err != nil { - return err - } - defer ensureReaderClosed(serverResp) - - return nil + ensureReaderClosed(serverResp) + return err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go index c4772a04e..397d67cdc 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go @@ -4,23 +4,38 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" ) // BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { if err := cli.NewVersionError("1.31", "build prune"); err != nil { return nil, err } report := types.BuildCachePruneReport{} - serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { return nil, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return nil, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/checkpoint_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/checkpoint_list.go index 2b73fb553..66d46dd16 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -18,11 +18,11 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options } resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) if err != nil { return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) return checkpoints, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client.go index b874b3b52..b63d4d6d4 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client.go @@ -23,7 +23,7 @@ For example, to list running containers (the equivalent of "docker ps"): ) func main() { - cli, err := client.NewEnvClient() + cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } @@ -47,16 +47,13 @@ import ( "net" "net/http" "net/url" - "os" "path" - "path/filepath" "strings" "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" ) @@ -84,13 +81,22 @@ type Client struct { customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool } // CheckRedirect specifies the policy for dealing with redirect responses: // If the request is non-GET return `ErrRedirect`. Otherwise use the last response. // // Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to to send a request +// The Docker client (and by extension docker API client) can be made to send a request // like POST /containers//start where what would normally be in the name section of the URL is empty. // This triggers an HTTP 301 from the daemon. // In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. @@ -103,130 +109,6 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { return ErrRedirect } -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use NewClientWithOpts(FromEnv) -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} - -// FromEnv configures the client with values from environment variables. -// -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func FromEnv(c *Client) error { - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - } - - if host := os.Getenv("DOCKER_HOST"); host != "" { - if err := WithHost(host)(c); err != nil { - return err - } - } - - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - c.version = version - c.manualOverride = true - } - return nil -} - -// WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { - return func(c *Client) error { - opts := tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } -} - -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -func WithDialer(dialer *net.Dialer) func(*Client) error { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialer.DialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithVersion overrides the client version with the specified one -func WithVersion(version string) func(*Client) error { - return func(c *Client) error { - c.version = version - return nil - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) func(*Client) error { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) func(*Client) error { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) func(*Client) error { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - // NewClientWithOpts initializes a new API client with default values. It takes functors // to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` // It also initializes the custom http headers to add to each request. @@ -234,7 +116,7 @@ func WithHTTPHeaders(headers map[string]string) func(*Client) error { // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { +func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err @@ -242,7 +124,6 @@ func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { c := &Client{ host: DefaultDockerHost, version: api.DefaultVersion, - scheme: "http", client: client, proto: defaultProto, addr: defaultAddr, @@ -257,14 +138,18 @@ func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { if _, ok := c.client.Transport.(http.RoundTripper); !ok { return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) } - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - c.scheme = "https" + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } } return c, nil @@ -283,18 +168,6 @@ func defaultHTTPClient(host string) (*http.Client, error) { }, nil } -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// Deprecated: use NewClientWithOpts -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - // Close the transport used by the client func (cli *Client) Close() error { if t, ok := cli.client.Transport.(*http.Transport); ok { @@ -305,8 +178,11 @@ func (cli *Client) Close() error { // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -322,19 +198,31 @@ func (cli *Client) ClientVersion() string { } // NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } } // NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) } +} +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { // try the latest version before versioning headers existed if p.APIVersion == "" { p.APIVersion = "1.24" @@ -349,6 +237,12 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { if versions.LessThan(p.APIVersion, cli.version) { cli.version = p.APIVersion } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } } // DaemonHost returns the host address used by the client @@ -400,3 +294,16 @@ func (cli *Client) CustomHTTPHeaders() map[string]string { func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { cli.customHTTPHeaders = headers } + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_deprecated.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 000000000..54cdfc29a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,23 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go index c8b802ad3..ee7d411df 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go @@ -15,11 +15,11 @@ func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (t return response, err } resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_inspect.go index 4ac566ad8..7d0ce3e11 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_inspect.go @@ -18,10 +18,10 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C return swarm.Config{}, nil, err } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go index 2b9d54606..565acc6e2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go @@ -27,12 +27,12 @@ func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptio } resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var configs []swarm.Config err = json.NewDecoder(resp.body).Decode(&configs) - ensureReaderClosed(resp) return configs, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_remove.go index a96871e98..a708fcaec 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_remove.go @@ -8,6 +8,6 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "config", id) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go index 377a2ea68..2966e88c8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go @@ -45,11 +45,11 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option var response types.IDResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_copy.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_copy.go index d706260ce..bb278bf7f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_copy.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_copy.go @@ -21,10 +21,10 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) if err != nil { return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) } - defer ensureReaderClosed(response) return getContainerPathStatFromHeader(response.header) } @@ -45,11 +45,12 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str apiPath := "/containers/" + containerID + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) if err != nil { return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) } - defer ensureReaderClosed(response) + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } @@ -69,6 +70,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) } + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go index d269a6189..5b795e0c1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/url" - "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" @@ -43,14 +42,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) if err != nil { - if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, objectNotFoundError{object: "image", id: config.Image} - } return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_diff.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_diff.go index 3b7c90c96..29dac8491 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_diff.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_diff.go @@ -13,11 +13,11 @@ func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]con var changes []container.ContainerChangeResponseItem serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return changes, err } err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) return changes, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_exec.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_exec.go index 535536b1e..e3ee755b7 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_exec.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_exec.go @@ -16,11 +16,11 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_inspect.go index f453064cf..c496bcffe 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_inspect.go @@ -16,13 +16,13 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } var response types.ContainerJSON err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } @@ -36,10 +36,10 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri query.Set("size", "1") } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_list.go index 9c218e221..1e7a63a9c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_list.go @@ -45,12 +45,12 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var containers []types.Container err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) return containers, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_prune.go index 14f88d93b..04383deaa 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_prune.go @@ -23,10 +23,10 @@ func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Arg } serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_remove.go index ab4cfc16f..df81461b8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_remove.go @@ -22,6 +22,6 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti } resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "container", containerID) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_top.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_top.go index 9c9fce7a0..a5b78999b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_top.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_top.go @@ -18,11 +18,11 @@ func (cli *Client) ContainerTop(ctx context.Context, containerID string, argumen } resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_update.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_update.go index 14e7f23df..6917cf9fb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_update.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_update.go @@ -11,12 +11,11 @@ import ( func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - - ensureReaderClosed(serverResp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/disk_usage.go index 8eb30eb5d..354cd3693 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/disk_usage.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/disk_usage.go @@ -13,10 +13,10 @@ func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { var du types.DiskUsage serverResp, err := cli.get(ctx, "/system/df", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return du, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { return du, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/distribution_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/distribution_inspect.go index 7245bbeed..f4e3794cb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -28,11 +28,11 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist } resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) if err != nil { return distributionInspect, err } err = json.NewDecoder(resp.body).Decode(&distributionInspect) - ensureReaderClosed(resp) return distributionInspect, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go index 0461af329..001c10288 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -32,16 +33,19 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility type notFound interface { error - NotFound() bool // Is the error a NotFound error + NotFound() bool } // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() + if _, ok := err.(notFound); ok { + return ok + } + return errdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -49,9 +53,7 @@ type objectNotFoundError struct { id string } -func (e objectNotFoundError) NotFound() bool { - return true -} +func (e objectNotFoundError) NotFound() {} func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) @@ -64,7 +66,7 @@ func wrapResponseError(err error, resp serverResponse, object, id string) error case resp.statusCode == http.StatusNotFound: return objectNotFoundError{object: object, id: id} case resp.statusCode == http.StatusNotImplemented: - return notImplementedError{message: err.Error()} + return errdefs.NotImplemented(err) default: return err } @@ -83,8 +85,10 @@ func (u unauthorizedError) Error() string { // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok + if _, ok := err.(unauthorizedError); ok { + return ok + } + return errdefs.IsUnauthorized(err) } type pluginPermissionDenied struct { @@ -118,8 +122,10 @@ func (e notImplementedError) NotImplemented() bool { // This is returned by the API when a requested feature has not been // implemented. func IsErrNotImplemented(err error) bool { - te, ok := err.(notImplementedError) - return ok && te.NotImplemented() + if _, ok := err.(notImplementedError); ok { + return ok + } + return errdefs.IsNotImplemented(err) } // NewVersionError returns an error if the APIVersion required diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/hijack.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/hijack.go index 35f5dd86d..e9c9a752f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/hijack.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/hijack.go @@ -23,14 +23,14 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - apiPath := cli.getAPIPath(path, query) + apiPath := cli.getAPIPath(ctx, path, query) req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err } req = cli.addHeaders(req, headers) - conn, err := cli.setupHijackConn(req, "tcp") + conn, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err } @@ -38,7 +38,20 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { if tlsConfig != nil && proto != "unix" && proto != "npipe" { return tls.Dial(proto, addr, tlsConfig) } @@ -48,12 +61,13 @@ func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { return net.Dial(proto, addr) } -func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + dialer := cli.Dialer() + conn, err := dialer(ctx) if err != nil { return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go index 9add3c10b..8fcf99503 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go @@ -134,5 +134,13 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("buildid", options.BuildID) } query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } return query, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_history.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_history.go index 0151b9517..b5bea10d8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_history.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_history.go @@ -12,11 +12,11 @@ import ( func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { var history []image.HistoryResponseItem serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return history, err } err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) return history, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go index 2f8f6d2f1..1eb8dce02 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} } serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_list.go index 32fae27b3..4fa8c006b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_list.go @@ -35,11 +35,11 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return images, err } err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) return images, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_prune.go index 78ee3f6c4..56af6d7f9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_prune.go @@ -23,10 +23,10 @@ func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) ( } serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go index d97aacf8c..a23975591 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go @@ -3,12 +3,12 @@ package client // import "github.com/docker/docker/client" import ( "context" "io" - "net/http" "net/url" "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -35,7 +35,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.I } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go index a15871c2b..49d412ee3 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go @@ -4,11 +4,11 @@ import ( "context" "errors" "io" - "net/http" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -36,7 +36,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im query.Set("tag", tag) resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go index 45d6e6f0d..84a41af0f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go @@ -21,11 +21,11 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return dels, wrapResponseError(err, resp, "image", imageID) } err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) return dels, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go index 176de3c58..82955a747 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go @@ -4,12 +4,12 @@ import ( "context" "encoding/json" "fmt" - "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" ) // ImageSearch makes the docker host to search by a term in a remote registry. @@ -29,7 +29,8 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return results, privilegeErr @@ -41,7 +42,6 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) return results, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/info.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/info.go index 121f256ab..c856704e2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/info.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/info.go @@ -13,10 +13,10 @@ import ( func (cli *Client) Info(ctx context.Context) (types.Info, error) { var info types.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return info, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/interface.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/interface.go index 9250c468a..cde64be4b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/interface.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/interface.go @@ -38,7 +38,8 @@ type CommonAPIClient interface { ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) - DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) Close() error } @@ -85,7 +86,7 @@ type DistributionAPIClient interface { // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/login.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/login.go index 7d6618190..f05852063 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/login.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/login.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -14,16 +13,13 @@ import ( // It returns unauthorizedError when the authentication fails. func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) - if resp.statusCode == http.StatusUnauthorized { - return registry.AuthenticateOKBody{}, unauthorizedError{err} - } if err != nil { return registry.AuthenticateOKBody{}, err } var response registry.AuthenticateOKBody err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_create.go index 41da2ac61..278d9383a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_create.go @@ -15,11 +15,11 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types } var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) + err = json.NewDecoder(serverResp.body).Decode(&response) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_inspect.go index 025f6d875..89a05b302 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_inspect.go @@ -34,10 +34,10 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, query.Set("scope", options.Scope) } resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) if err != nil { return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_list.go index f16b2f562..7130c1364 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_list.go @@ -22,10 +22,10 @@ func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOpt } var networkResources []types.NetworkResource resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return networkResources, err } err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) return networkResources, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_prune.go index 6418b8b60..cebb18821 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_prune.go @@ -23,10 +23,10 @@ func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving network prune report: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_remove.go index 12741437b..e71b16d86 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/network_remove.go @@ -5,6 +5,6 @@ import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "network", networkID) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_inspect.go index 593b2e9f0..d296c9fdd 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} } serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go index 9883f6fc5..c212906bc 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go @@ -25,12 +25,12 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) } resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var nodes []swarm.Node err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) return nodes, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go index e7a750571..03ab87809 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go @@ -15,6 +15,6 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. } resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "node", nodeID) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/options.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 000000000..6f77f0955 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,172 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + } + + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } + } + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + if err := WithVersion(version)(c); err != nil { + return err + } + } + return nil +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext +func WithDialer(dialer *net.Dialer) Opt { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go index 85d38adb5..90f39ec14 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go @@ -2,31 +2,65 @@ package client // import "github.com/docker/docker/client" import ( "context" + "net/http" "path" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) -// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping - req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest("HEAD", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } + serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) - - if serverResp.header != nil { - ping.APIVersion = serverResp.header.Get("API-Version") - - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - ping.OSType = serverResp.header.Get("OSType") + if err != nil { + return ping, err } - return ping, cli.checkResponseErr(serverResp) + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_create.go index 4591db50f..b95dbaf68 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_create.go @@ -18,9 +18,6 @@ func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, cr query.Set("name", createOptions.RepoName) resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - if err != nil { - return err - } ensureReaderClosed(resp) return err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_inspect.go index 0ab7beaee..81b89732b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -15,11 +15,11 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type return nil, nil, objectNotFoundError{object: "plugin", id: name} } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, nil, wrapResponseError(err, resp, "plugin", name) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { return nil, nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go index 13baa40a9..012afe61c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go @@ -4,11 +4,11 @@ import ( "context" "encoding/json" "io" - "net/http" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -78,7 +78,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_list.go index ade1051a9..8285cecd6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_list.go @@ -22,11 +22,11 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) if err != nil { return plugins, wrapResponseError(err, resp, "plugin", "") } err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) return plugins, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_remove.go index 8563bab0d..51ca1040d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_remove.go @@ -15,6 +15,6 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. } resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "plugin", name) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go index a19d62aa5..2610338da 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go @@ -15,8 +15,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" - "golang.org/x/net/context/ctxhttp" ) // serverResponse is a wrapper for http API responses. @@ -115,28 +115,30 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea } func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } resp, err := cli.doRequest(ctx, req) if err != nil { - return resp, err + return resp, errdefs.FromStatusCode(err, resp.statusCode) } - return resp, cli.checkResponseErr(resp) + err = cli.checkResponseErr(resp) + return resp, errdefs.FromStatusCode(err, resp.statusCode) } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - resp, err := ctxhttp.Do(ctx, cli.client, req) + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") } // Don't decorate context sentinel errors; users may be comparing to @@ -176,7 +178,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // this is localised - for example in French the error would be // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + } else { + f.Close() + err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + } } return serverResp, errors.Wrap(err, "error during connect") @@ -195,9 +203,21 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return nil } - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return err + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = ioutil.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } } if len(body) == 0 { return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) @@ -212,14 +232,14 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { - return fmt.Errorf("Error reading JSON: %v", err) + return errors.Wrap(err, "Error reading JSON") } - errorMessage = errorResponse.Message + errorMessage = strings.TrimSpace(errorResponse.Message) } else { - errorMessage = string(body) + errorMessage = strings.TrimSpace(string(body)) } - return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") } func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go index 09fae82f2..fd5b91413 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go @@ -15,11 +15,11 @@ func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (t return response, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_inspect.go index e8322f458..d093916c9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_inspect.go @@ -18,10 +18,10 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go index f6bf7ba47..a0289c9f4 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go @@ -27,12 +27,12 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio } resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var secrets []swarm.Secret err = json.NewDecoder(resp.body).Decode(&secrets) - ensureReaderClosed(resp) return secrets, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_remove.go index e9d521829..c16f55580 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_remove.go @@ -8,6 +8,6 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "secret", id) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go index 8fadda4a9..620fc6cff 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go @@ -72,6 +72,7 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, var response types.ServiceCreateResponse resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } @@ -82,7 +83,6 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) } - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go index de6aa22de..2801483b8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go @@ -20,10 +20,10 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, query := url.Values{} query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go index 7d53e2b9b..64d35e715 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go @@ -24,12 +24,12 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt } resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var services []swarm.Service err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) return services, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_remove.go index fe3421bec..953a2adf5 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_remove.go @@ -5,6 +5,6 @@ import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "service", serviceID) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go index 5a7a61b01..cd0f59e21 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go @@ -10,7 +10,9 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// ServiceUpdate updates a Service. +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { var ( query = url.Values{} @@ -77,6 +79,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } @@ -87,6 +90,5 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) } - ensureReaderClosed(resp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/session.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/session.go deleted file mode 100644 index c247123b4..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/session.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net" - "net/http" -) - -// DialSession returns a connection that can be used communication with daemon -func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", "/session", nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(req, proto) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index 0c50c01a8..19f59dd58 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -10,12 +10,12 @@ import ( // SwarmGetUnlockKey retrieves the swarm's unlock key. func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.SwarmUnlockKeyResponse{}, err } var response types.SwarmUnlockKeyResponse err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_init.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_init.go index 742ca0f04..da3c1637e 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_init.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_init.go @@ -10,12 +10,12 @@ import ( // SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) if err != nil { return "", err } var response string err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_inspect.go index cfaabb25b..b52b67a88 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -10,12 +10,12 @@ import ( // SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Swarm{}, err } var response swarm.Swarm err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_inspect.go index e1c0a736d..44d40ba5a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_inspect.go @@ -15,10 +15,10 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} } serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } - defer ensureReaderClosed(serverResp) body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go index 42d20c1b8..4869b4449 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go @@ -24,12 +24,12 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) } resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var tasks []swarm.Task err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) return tasks, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/version.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/version.go index 1989f6d6d..8f17ff4e8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/version.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/version.go @@ -10,12 +10,12 @@ import ( // ServerVersion returns information of the docker client and server host. func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return types.Version{}, err } var server types.Version err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) return server, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_create.go index f1f6fcdc4..92761b3c6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_create.go @@ -12,10 +12,10 @@ import ( func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) if err != nil { return volume, err } err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) return volume, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_inspect.go index f840682d2..e20b2c67c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_inspect.go @@ -23,10 +23,10 @@ func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (t var volume types.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) if err != nil { return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_list.go index 284554d67..2380d5638 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_list.go @@ -22,11 +22,11 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return volumes, err } err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) return volumes, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_prune.go index 70041efed..6e324708f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_prune.go @@ -23,10 +23,10 @@ func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving volume prune report: %v", err) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_remove.go index fc5a71d33..79decdafa 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/volume_remove.go @@ -16,6 +16,6 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - ensureReaderClosed(resp) + defer ensureReaderClosed(resp) return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 000000000..2772bd247 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,62 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + if count <= 0 { + delete(c.counts, path) + } + c.mu.Unlock() + return count +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 000000000..44434f7dc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,333 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + logDeprecatedWarning(name) + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + list := strings.Split(priority, ",") + logrus.Debugf("[graphdriver] priority list: %v", list) + for _, name := range list { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + logDeprecatedWarning(name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range list { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + logDeprecatedWarning(name) + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + if isDeprecated(name) { + // Deprecated storage-drivers are skipped in automatic selection, but + // can be selected through configuration. + continue + } + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + logDeprecatedWarning(name) + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + if !isEmptyDir(p) { + driversMap[driver] = true + } + } + } + return driversMap +} + +// IsInitialized checks if the driver's home-directory exists and is non-empty. +func IsInitialized(driverHome string) bool { + _, err := os.Stat(driverHome) + if os.IsNotExist(err) { + return false + } + if err != nil { + logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err) + } + return !isEmptyDir(driverHome) +} + +// isEmptyDir checks if a directory is empty. It is used to check if prior +// storage-driver directories exist. If an error occurs, it also assumes the +// directory is not empty (which preserves the behavior _before_ this check +// was added) +func isEmptyDir(name string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + + if _, err = f.Readdirnames(1); err == io.EOF { + return true + } + return false +} + +// isDeprecated checks if a storage-driver is marked "deprecated" +func isDeprecated(name string) bool { + switch name { + // NOTE: when deprecating a driver, update daemon.fillDriverInfo() accordingly + case "aufs", "devicemapper", "overlay": + return true + } + return false +} + +// logDeprecatedWarning logs a warning if the given storage-driver is marked "deprecated" +func logDeprecatedWarning(name string) { + if isDeprecated(name) { + logrus.Warnf("[graphdriver] WARNING: the %s storage-driver is deprecated, and will be removed in a future release", name) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 000000000..cd83c4e21 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,21 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + // List of drivers that should be used in an order + priority = "zfs" +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 000000000..61c6b24a9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,124 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "github.com/docker/docker/pkg/mount" + "golang.org/x/sys/unix" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // List of drivers that should be used in an order + priority = "btrfs,zfs,overlay2,aufs,overlay,devicemapper,vfs" + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf unix.Statfs_t + if err := unix.Statfs(rootpath, &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provided FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 000000000..1f2e8f071 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!windows,!freebsd + +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in an order + priority = "unsupported" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 000000000..856b575e7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,12 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in order + priority = "windowsfilter" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/errors.go new file mode 100644 index 000000000..96d354455 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/errors.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +const ( + // ErrNotSupported returned when driver is not supported. + ErrNotSupported NotSupportedError = "driver not supported" + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)" + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver" +) + +// ErrUnSupported signals that the graph-driver is not supported on the current configuration +type ErrUnSupported interface { + NotSupported() +} + +// NotSupportedError signals that the graph-driver is not supported on the current configuration +type NotSupportedError string + +func (e NotSupportedError) Error() string { + return string(e) +} + +// NotSupported signals that a graph-driver is not supported. +func (e NotSupportedError) NotSupported() {} + +// IsDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func IsDriverNotSupported(err error) bool { + switch err.(type) { + case ErrUnSupported: + return true + default: + return false + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 000000000..f06caedb9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,175 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "io" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods on the local file system, +// which it may or may not support on its own. See the comment +// on the exported NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + layerFs := layerRootFs.Path() + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + parentFs := parentRootFs.Path() + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + parentFs := "" + + if parent != "" { + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + parentFs = parentRootFs.Path() + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerRootFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.WithField("id", id).Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.WithField("id", id).Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs.Path(), changes), nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 000000000..b0983c566 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,55 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "path/filepath" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/pkg/errors" +) + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PluginObj.Config.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + + var proxy *graphDriverProxy + + switch pt := pl.(type) { + case plugingetter.PluginWithV1Client: + proxy = &graphDriverProxy{name, pl, Capabilities{}, pt.Client()} + case plugingetter.PluginAddr: + if pt.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", pt.Protocol()) + } + addr := pt.Addr() + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pt.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error creating plugin client") + } + proxy = &graphDriverProxy{name, pl, Capabilities{}, client} + default: + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", pt)) + } + + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 000000000..cb350d807 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,264 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "errors" + "fmt" + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities + client *plugins.Client +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.client.Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return nil, err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return containerfs.NewLocalContainerFS(d.p.ScopedPath(ret.Dir)), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.client.Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/config.go new file mode 100644 index 000000000..438051c29 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/config.go @@ -0,0 +1,266 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore refstore.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string + // Platform is the requested platform of the image being pulled + Platform *specs.Platform +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStores (indexed by operating system) manages layers. + LayerStores map[string]PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) + PlatformFromConfig([]byte) (*specs.Platform, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + return unmarshalledConfig.RootFS, nil +} + +func (s *imageConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa. Exception on Windows if Linux Containers are enabled. + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + os := unmarshalledConfig.OS + if os == "" { + os = runtime.GOOS + } + if !system.IsOSSupported(os) { + return nil, system.ErrNotSupportedOperatingSystem + } + return &specs.Platform{OS: os, Architecture: unmarshalledConfig.Architecture, OSVersion: unmarshalledConfig.OSVersion}, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProvidersFromStores returns layer providers backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProvidersFromStores(lss map[string]layer.Store) map[string]PushLayerProvider { + plps := make(map[string]PushLayerProvider) + for os, ls := range lss { + plps[os] = &storeLayerProvider{ls: ls} + } + return plps +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 000000000..9dc5955f2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,207 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "fmt" + "net/url" + "strings" + "syscall" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +type notFoundError struct { + cause errcode.Error + ref reference.Named +} + +func (e notFoundError) Error() string { + switch e.cause.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + return errors.Wrapf(e.cause, "pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(e.ref)).Error() + case v2.ErrorCodeManifestUnknown: + return errors.Wrapf(e.cause, "manifest for %s not found", reference.FamiliarString(e.ref)).Error() + case v2.ErrorCodeNameUnknown: + return errors.Wrapf(e.cause, "repository %s not found", reference.FamiliarName(e.ref)).Error() + } + // Shouldn't get here, but this is better than returning an empty string + return e.cause.Message +} + +func (e notFoundError) NotFound() {} + +func (e notFoundError) Cause() error { + return e.cause +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeDenied, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return notFoundError{v, ref} + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return errdefs.Unknown(err) +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error, mirrorEndpoint bool) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0], mirrorEndpoint) + case ErrNoSupport: + return continueOnError(v.Err, mirrorEndpoint) + case errcode.Error: + return mirrorEndpoint || shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + // ImageConfigPullError only happens with v2 images, v1 fallback is + // unnecessary. + // Failures from a mirror endpoint should result in fallback to the + // canonical repo. + return mirrorEndpoint + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} + +type invalidManifestClassError struct { + mediaType string + class string +} + +func (e invalidManifestClassError) Error() string { + return fmt.Sprintf("Encountered remote %q(%s) when fetching", e.mediaType, e.class) +} + +func (e invalidManifestClassError) InvalidParameter() {} + +type invalidManifestFormatError struct{} + +func (invalidManifestFormatError) Error() string { + return "unsupported manifest format" +} + +func (invalidManifestFormatError) InvalidParameter() {} + +type reservedNameError string + +func (e reservedNameError) Error() string { + return "'" + string(e) + "' is a reserved name" +} + +func (e reservedNameError) Forbidden() {} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 000000000..4ae8223bd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata // import "github.com/docker/docker/distribution/metadata" + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 000000000..5575c59b0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata // import "github.com/docker/docker/distribution/metadata" + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 000000000..fe3349855 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata // import "github.com/docker/docker/distribution/metadata" + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes(buf)), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove disassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 000000000..be366ce4a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,196 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL) + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + + if err := puller.Pull(ctx, ref, imagePullConfig.Platform); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Infof("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return errors.WithStack(reservedNameError(api.NoBaseImageSpecifier)) + } + return nil +} + +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != refstore.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 000000000..3307458fd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,1013 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/ocischema" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + v1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref, platform); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err, p.endpoint.Mirror) { + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform *specs.Platform) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref, platform) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef, platform) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + diffID layer.DiffID + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier = ld.digest.Verifier() + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, platform *specs.Platform) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + + // give registries time to upgrade to schema2 and only warn if we know a registry has been upgraded long time ago + // TODO: condition to be removed + if reference.Domain(ref) == "docker.io" { + msg := fmt.Sprintf("Image %s uses outdated schema1 manifest format. Please upgrade to a schema2 image for better future compatibility. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) + logrus.Warn(msg) + progress.Message(p.config.ProgressOutput, "", msg) + } + + id, manifestDigest, err = p.pullSchema1(ctx, ref, v, platform) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v, platform) + if err != nil { + return false, err + } + case *ocischema.DeserializedManifest: + id, manifestDigest, err = p.pullOCI(ctx, ref, v, platform) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v, platform) + if err != nil { + return false, err + } + default: + return false, invalidManifestFormatError{} + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != refstore.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + // The v1 manifest itself doesn't directly contain an OS. However, + // the history does, but unfortunately that's a string, so search through + // all the history until hopefully we find one which indicates the OS. + // supertest2014/nyan is an example of a registry image with schemav1. + configOS := runtime.GOOS + if system.LCOWSupported() { + type config struct { + Os string `json:"os,omitempty"` + } + for _, v := range verifiedManifest.History { + var c config + if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { + if c.Os != "" { + configOS = c.Os + break + } + } + } + } + + // In the situation that the API call didn't specify an OS explicitly, but + // we support the operating system, switch to that operating system. + // eg FROM supertest2014/nyan with no platform specifier, and docker build + // with no --platform= flag under LCOW. + requestedOS := "" + if platform != nil { + requestedOS = platform.OS + } else if system.IsOSSupported(configOS) { + requestedOS = configOS + } + + // Early bath if the requested OS doesn't match that of the configuration. + // This avoids doing the download, only to potentially fail later. + if !strings.EqualFold(configOS, requestedOS) { + return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2Layers(ctx context.Context, target distribution.Descriptor, layers []distribution.Descriptor, platform *specs.Platform) (id digest.Digest, err error) { + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + configErrChan := make(chan error, 1) + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + configErrChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + configPlatform *specs.Platform // for LCOW when registering downloaded layers + ) + + layerStoreOS := runtime.GOOS + if platform != nil { + layerStoreOS = platform.OS + } + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err != nil { + return "", err + } + if configRootFS == nil { + return "", errRootFSInvalid + } + if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { + return "", err + } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", errRootFSMismatch + } + if platform == nil { + // Early bath if the requested OS doesn't match that of the configuration. + // This avoids doing the download, only to potentially fail later. + if !system.IsOSSupported(configPlatform.OS) { + return "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, layerStoreOS) + } + layerStoreOS = configPlatform.OS + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } + } + + if p.config.DownloadManager != nil { + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layerStoreOS, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return + } + + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) + } + + if configJSON == nil { + configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err == nil && configRootFS == nil { + err = errRootFSInvalid + } + if err != nil { + cancel() + select { + case <-downloadsDone: + case <-layerErrChan: + } + return "", err + } + } + + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", err + } + + if release != nil { + defer release() + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", err + } + + return imageID, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) + return id, manifestDigest, err +} + +func (p *v2Puller) pullOCI(ctx context.Context, ref reference.Named, mfst *ocischema.DeserializedManifest, platform *specs.Platform) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + id, err = p.pullSchema2Layers(ctx, mfst.Target(), mfst.Layers, platform) + return id, manifestDigest, err +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, nil, err + } + platform, err := s.PlatformFromConfig(configJSON) + if err != nil { + return nil, nil, nil, err + } + return configJSON, rootfs, platform, nil + case err := <-errChan: + return nil, nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, pp *specs.Platform) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var platform specs.Platform + if pp != nil { + platform = *pp + } + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), platforms.Format(platform), runtime.GOARCH) + + manifestMatches := filterManifests(mfstList.Manifests, platform) + + if len(manifestMatches) == 0 { + errMsg := fmt.Sprintf("no matching manifest for %s in the manifest list entries", formatPlatform(platform)) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) + } + + if len(manifestMatches) > 1 { + logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) + } + manifestDigest := manifestMatches[0].Digest + + if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil { + return "", "", err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + msg := fmt.Sprintf("[DEPRECATION NOTICE] v2 schema1 manifests in manifest lists are not supported and will break in a future release. Suggest author of %s to upgrade to v2 schema2. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", ref) + logrus.Warn(msg) + progress.Message(p.config.ProgressOutput, "", msg) + + platform := toOCIPlatform(manifestMatches[0].Platform) + id, _, err = p.pullSchema1(ctx, manifestRef, v, &platform) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + platform := toOCIPlatform(manifestMatches[0].Platform) + id, _, err = p.pullSchema2(ctx, manifestRef, v, &platform) + if err != nil { + return "", "", err + } + case *ocischema.DeserializedManifest: + platform := toOCIPlatform(manifestMatches[0].Platform) + id, _, err = p.pullOCI(ctx, manifestRef, v, &platform) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} + +func toOCIPlatform(p manifestlist.PlatformSpec) specs.Platform { + return specs.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + OSFeatures: p.OSFeatures, + OSVersion: p.OSVersion, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 000000000..fea1eb6e6 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,67 @@ +// +build !windows + +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} + +func filterManifests(manifests []manifestlist.ManifestDescriptor, p specs.Platform) []manifestlist.ManifestDescriptor { + p = platforms.Normalize(withDefault(p)) + m := platforms.NewMatcher(p) + var matches []manifestlist.ManifestDescriptor + for _, desc := range manifests { + if m.Match(toOCIPlatform(desc.Platform)) { + matches = append(matches, desc) + logrus.Debugf("found match for %s with media type %s, digest %s", platforms.Format(p), desc.MediaType, desc.Digest.String()) + } + } + + // deprecated: backwards compatibility with older versions that didn't compare variant + if len(matches) == 0 && p.Architecture == "arm" { + p = platforms.Normalize(p) + for _, desc := range manifests { + if desc.Platform.OS == p.OS && desc.Platform.Architecture == p.Architecture { + matches = append(matches, desc) + logrus.Debugf("found deprecated partial match for %s with media type %s, digest %s", platforms.Format(p), desc.MediaType, desc.Digest.String()) + } + } + } + + return matches +} + +// checkImageCompatibility is a Windows-specific function. No-op on Linux +func checkImageCompatibility(imageOS, imageOSVersion string) error { + return nil +} + +func withDefault(p specs.Platform) specs.Platform { + def := platforms.DefaultSpec() + if p.OS == "" { + p.OS = def.OS + } + if p.Architecture == "" { + p.Architecture = def.Architecture + p.Variant = def.Variant + } + return p +} + +func formatPlatform(platform specs.Platform) string { + if platform.OS == "" { + platform = platforms.DefaultSpec() + } + return platforms.Format(platform) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_windows.go new file mode 100644 index 000000000..1162e9a8b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -0,0 +1,146 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "runtime" + "sort" + "strconv" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + + if len(ld.src.URLs) == 0 { + return rsc, err + } + + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} + +func filterManifests(manifests []manifestlist.ManifestDescriptor, p specs.Platform) []manifestlist.ManifestDescriptor { + version := system.GetOSVersion() + osVersion := fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build) + logrus.Debugf("will prefer Windows entries with version %s", osVersion) + + var matches []manifestlist.ManifestDescriptor + foundWindowsMatch := false + for _, manifestDescriptor := range manifests { + if (manifestDescriptor.Platform.Architecture == runtime.GOARCH) && + ((p.OS != "" && manifestDescriptor.Platform.OS == p.OS) || // Explicit user request for an OS we know we support + (p.OS == "" && system.IsOSSupported(manifestDescriptor.Platform.OS))) { // No user requested OS, but one we can support + if strings.EqualFold("windows", manifestDescriptor.Platform.OS) { + if err := checkImageCompatibility("windows", manifestDescriptor.Platform.OSVersion); err != nil { + continue + } + foundWindowsMatch = true + } + matches = append(matches, manifestDescriptor) + logrus.Debugf("found match %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } else { + logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", manifestDescriptor.Platform.OS, manifestDescriptor.Platform.Architecture, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } + } + if foundWindowsMatch { + sort.Stable(manifestsByVersion{osVersion, matches}) + } + return matches +} + +func versionMatch(actual, expected string) bool { + // Check whether the version matches up to the build, ignoring UBR + return strings.HasPrefix(actual, expected+".") +} + +type manifestsByVersion struct { + version string + list []manifestlist.ManifestDescriptor +} + +func (mbv manifestsByVersion) Less(i, j int) bool { + // TODO: Split version by parts and compare + // TODO: Prefer versions which have a greater version number + // Move compatible versions to the top, with no other ordering changes + return (strings.EqualFold("windows", mbv.list[i].Platform.OS) && !strings.EqualFold("windows", mbv.list[j].Platform.OS)) || + (versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version)) +} + +func (mbv manifestsByVersion) Len() int { + return len(mbv.list) +} + +func (mbv manifestsByVersion) Swap(i, j int) { + mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i] +} + +// checkImageCompatibility blocks pulling incompatible images based on a later OS build +// Fixes https://github.com/moby/moby/issues/36184. +func checkImageCompatibility(imageOS, imageOSVersion string) error { + if imageOS == "windows" { + hostOSV := system.GetOSVersion() + splitImageOSVersion := strings.Split(imageOSVersion, ".") // eg 10.0.16299.nnnn + if len(splitImageOSVersion) >= 3 { + if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil { + if imageOSBuild > int(hostOSV.Build) { + errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString()) + logrus.Debugf(errMsg) + return errors.New(errMsg) + } + } + } + } + return nil +} + +func formatPlatform(platform specs.Platform) string { + if platform.OS == "" { + platform = platforms.DefaultSpec() + } + return fmt.Sprintf("%s %s", platforms.Format(platform), system.GetOSVersion().ToString()) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 000000000..5617a4c95 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,180 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "bufio" + "compress/gzip" + "context" + "fmt" + "io" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "github.com/sirupsen/logrus" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return nil, fmt.Errorf("protocol version %d no longer supported. Please contact admins of registry %s", endpoint.Version, endpoint.URL) + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Infof("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push_v2.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 000000000..06863ad0d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,712 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool + hasAuthInfo bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err, p.endpoint.Mirror) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + platform, err := p.config.ImageStore.PlatformFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get platform for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStores[platform.OS].Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo.Name, + ref: p.ref, + endpoint: p.endpoint, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for range rootfs.DiffIDs { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + msg := fmt.Sprintf("[DEPRECATION NOTICE] registry v2 schema1 support will be removed in an upcoming release. Please contact admins of the %s registry NOW to avoid future disruption. More information at https://docs.docker.com/registry/spec/deprecated-schema-v1/", reference.Domain(ref)) + logrus.Warn(msg) + progress.Message(p.config.ProgressOutput, "", msg) + + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + endpoint registry.APIEndpoint + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + isUnauthorizedError := false + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) + continue + } + + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + case errcode.Errors: + for _, e := range err { + switch e := e.(type) { + case errcode.Error: + if e.Code == errcode.ErrorCodeUnauthorized { + // when unauthorized error that indicate user don't has right to push layer to register + logrus.Debugln("failed to push layer to registry because unauthorized error") + isUnauthorizedError = true + } + default: + } + } + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register + // and he hasn't login either, in this case candidate cache should be removed + if len(mountCandidate.SourceRepository) > 0 && + !(isUnauthorizedError && !pd.pushState.hasAuthInfo) && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + // upload the blob + return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.Digester() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.Name() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.Name()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/registry.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 000000000..d81530b75 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,158 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "context" + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + ocispec.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.Headers(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := reference.WithName(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/download.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 000000000..e8cda9362 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,474 @@ +package xfer // import "github.com/docker/docker/distribution/xfer" + +import ( + "context" + "errors" + "fmt" + "io" + "runtime" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStores map[string]layer.Store + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStores: layerStores, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + // Assume that the operating system is the host OS if blank, and validate it + // to ensure we don't cause a panic by an invalid index into the layerstores. + if os == "" { + os = runtime.GOOS + } + if !system.IsOSSupported(os) { + return image.RootFS{}, nil, system.ErrNotSupportedOperatingSystem + } + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStores[os].Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { // As layerstore may set the driver + withRegistered.Registered(diffID) + } + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[os], + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(ldm.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStores[os], + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 000000000..c356fde8d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer // import "github.com/docker/docker/distribution/xfer" + +import ( + "context" + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency sets the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/upload.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 000000000..33b45ad74 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,174 @@ +package xfer // import "github.com/docker/docker/distribution/xfer" + +import ( + "context" + "errors" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/sirupsen/logrus" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(lum.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/useragent.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 000000000..afbdcd858 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion // import "github.com/docker/docker/dockerversion" + +import ( + "context" + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +type UAStringKey struct{} + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey{}) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey{}).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/version_lib.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 000000000..b7d465044 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,17 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion // import "github.com/docker/docker/dockerversion" + +// Default build-time variable for library-import. +// This file is overridden on build with build-time information. +const ( + GitCommit = "library-import" + Version = "library-import" + BuildTime = "library-import" + IAmStatic = "library-import" + InitCommitID = "library-import" + PlatformName = "" + ProductName = "" + DefaultProductLicense = "" +) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go index e6a2275b2..61e7456b4 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go @@ -43,11 +43,6 @@ type ErrNotModified interface { NotModified() } -// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists -type ErrAlreadyExists interface { - AlreadyExists() -} - // ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. type ErrNotImplemented interface { NotImplemented() diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go index 6169c2bc6..c9916e013 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go @@ -12,8 +12,8 @@ func (e errNotFound) Cause() error { // NotFound is a helper to create an error of the class with the same name from any error type func NotFound(err error) error { - if err == nil { - return nil + if err == nil || IsNotFound(err) { + return err } return errNotFound{err} } @@ -28,8 +28,8 @@ func (e errInvalidParameter) Cause() error { // InvalidParameter is a helper to create an error of the class with the same name from any error type func InvalidParameter(err error) error { - if err == nil { - return nil + if err == nil || IsInvalidParameter(err) { + return err } return errInvalidParameter{err} } @@ -44,8 +44,8 @@ func (e errConflict) Cause() error { // Conflict is a helper to create an error of the class with the same name from any error type func Conflict(err error) error { - if err == nil { - return nil + if err == nil || IsConflict(err) { + return err } return errConflict{err} } @@ -60,8 +60,8 @@ func (e errUnauthorized) Cause() error { // Unauthorized is a helper to create an error of the class with the same name from any error type func Unauthorized(err error) error { - if err == nil { - return nil + if err == nil || IsUnauthorized(err) { + return err } return errUnauthorized{err} } @@ -76,6 +76,9 @@ func (e errUnavailable) Cause() error { // Unavailable is a helper to create an error of the class with the same name from any error type func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } return errUnavailable{err} } @@ -89,8 +92,8 @@ func (e errForbidden) Cause() error { // Forbidden is a helper to create an error of the class with the same name from any error type func Forbidden(err error) error { - if err == nil { - return nil + if err == nil || IsForbidden(err) { + return err } return errForbidden{err} } @@ -105,8 +108,8 @@ func (e errSystem) Cause() error { // System is a helper to create an error of the class with the same name from any error type func System(err error) error { - if err == nil { - return nil + if err == nil || IsSystem(err) { + return err } return errSystem{err} } @@ -121,28 +124,12 @@ func (e errNotModified) Cause() error { // NotModified is a helper to create an error of the class with the same name from any error type func NotModified(err error) error { - if err == nil { - return nil + if err == nil || IsNotModified(err) { + return err } return errNotModified{err} } -type errAlreadyExists struct{ error } - -func (errAlreadyExists) AlreadyExists() {} - -func (e errAlreadyExists) Cause() error { - return e.error -} - -// AlreadyExists is a helper to create an error of the class with the same name from any error type -func AlreadyExists(err error) error { - if err == nil { - return nil - } - return errAlreadyExists{err} -} - type errNotImplemented struct{ error } func (errNotImplemented) NotImplemented() {} @@ -153,8 +140,8 @@ func (e errNotImplemented) Cause() error { // NotImplemented is a helper to create an error of the class with the same name from any error type func NotImplemented(err error) error { - if err == nil { - return nil + if err == nil || IsNotImplemented(err) { + return err } return errNotImplemented{err} } @@ -169,8 +156,8 @@ func (e errUnknown) Cause() error { // Unknown is a helper to create an error of the class with the same name from any error type func Unknown(err error) error { - if err == nil { - return nil + if err == nil || IsUnknown(err) { + return err } return errUnknown{err} } @@ -185,8 +172,8 @@ func (e errCancelled) Cause() error { // Cancelled is a helper to create an error of the class with the same name from any error type func Cancelled(err error) error { - if err == nil { - return nil + if err == nil || IsCancelled(err) { + return err } return errCancelled{err} } @@ -201,8 +188,8 @@ func (e errDeadline) Cause() error { // Deadline is a helper to create an error of the class with the same name from any error type func Deadline(err error) error { - if err == nil { - return nil + if err == nil || IsDeadline(err) { + return err } return errDeadline{err} } @@ -217,8 +204,8 @@ func (e errDataLoss) Cause() error { // DataLoss is a helper to create an error of the class with the same name from any error type func DataLoss(err error) error { - if err == nil { - return nil + if err == nil || IsDataLoss(err) { + return err } return errDataLoss{err} } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 000000000..1debd2ae0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,198 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + containerderrors "github.com/containerd/containerd/errdefs" + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + if e, ok := err.(causer); ok { + return statusCodeFromGRPCError(e.Cause()) + } + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + default: + if e, ok := err.(causer); ok { + return statusCodeFromDistributionError(e.Cause()) + } + } + return http.StatusInternalServerError +} + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go index e0513331b..3abf07d0c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go @@ -15,7 +15,6 @@ func getImplementer(err error) error { ErrForbidden, ErrSystem, ErrNotModified, - ErrAlreadyExists, ErrNotImplemented, ErrCancelled, ErrDeadline, @@ -77,12 +76,6 @@ func IsNotModified(err error) bool { return ok } -// IsAlreadyExists returns if the passed in error is a AlreadyExists error -func IsAlreadyExists(err error) bool { - _, ok := getImplementer(err).(ErrAlreadyExists) - return ok -} - // IsNotImplemented returns if the passed in error is an ErrNotImplemented func IsNotImplemented(err error) bool { _, ok := getImplementer(err).(ErrNotImplemented) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/image/fs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 000000000..7080c8c01 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,175 @@ +package image // import "github.com/docker/docker/image" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + return os.Remove(s.contentFile(dgst)) +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/image/image.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 000000000..079ecb813 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,232 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "errors" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. +func (img *Image) BaseImgArch() string { + arch := img.Architecture + if arch == "" { + arch = runtime.GOARCH + } + return arch +} + +// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) OperatingSystem() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, os string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + var rootFS *RootFS + if img.RootFS != nil { + rootFS = img.RootFS.Clone() + } else { + rootFS = NewRootFS() + } + + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: img.BaseImgArch(), + OS: os, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/image/rootfs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 000000000..f73a0660f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,53 @@ +package image // import "github.com/docker/docker/image" + +import ( + "runtime" + + "github.com/docker/docker/layer" + "github.com/sirupsen/logrus" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// Clone returns a copy of the RootFS +func (r *RootFS) Clone() *RootFS { + newRoot := NewRootFS() + newRoot.Type = r.Type + newRoot.DiffIDs = make([]layer.DiffID, len(r.DiffIDs)) + copy(newRoot.DiffIDs, r.DiffIDs) + return newRoot +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/image/store.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 000000000..1a8a8a245 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,346 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image + Len() int +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.RWMutex + lss map[string]LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set +} + +// NewImageStore returns new store object for given set of layer stores +func NewImageStore(fs StoreBackend, lss map[string]LayerGetReleaser) (Store, error) { + is := &store{ + lss: lss, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + logrus.Errorf("not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } + l, err = is.lss[img.OperatingSystem()].Get(chainID) + if err != nil { + if err == layer.ErrLayerDoesNotExist { + logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + return "", system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(layerID) + if err != nil { + return "", errors.Wrapf(err, "failed to get layer %s", layerID) + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +type imageNotFoundError string + +func (e imageNotFoundError) Error() string { + return "No such image: " + string(e) +} + +func (imageNotFoundError) NotFound() {} + +func (is *store) Search(term string) (ID, error) { + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = imageNotFoundError(term) + } + return "", errors.WithStack(err) + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + img, err := is.Get(id) + if err != nil { + return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err) + } + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, fmt.Errorf("unsupported image operating system %q", img.OperatingSystem()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.lss[img.OperatingSystem()].Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + +func (is *store) Children(id ID) []ID { + is.RLock() + defer is.RUnlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.RLock() + defer is.RUnlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} + +func (is *store) Len() int { + is.RLock() + defer is.RUnlock() + return len(is.images) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/image/v1/imagev1.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 000000000..c341ceaa7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,150 @@ +package v1 // import "github.com/docker/docker/image/v1" + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + return stringid.ValidateID(id) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/empty.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 000000000..c81c70214 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,61 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 000000000..208a0c3a8 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,355 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// newFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func newFSMetadataStore(root string) (*fileMetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content == "" { + return "", errors.Errorf("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + fz.Close() + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_unix.go new file mode 100644 index 000000000..68e7f9077 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package layer // import "github.com/docker/docker/layer" + +import "runtime" + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + return nil +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + return runtime.GOOS, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_windows.go new file mode 100644 index 000000000..cecad426c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + if os == "" { + return nil + } + return fm.ws.WriteFile("os", []byte(os), 0644) +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) + if err != nil { + // For backwards compatibility, the os file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid operating system value: %s", content) + } + + return content, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 000000000..d0c7fa860 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,237 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "io" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current host operating system. + ErrNotSupported = errors.New("not support on this host operating system") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (containerfs.ContainerFS, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root containerfs.ContainerFS) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 000000000..81730e9d9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,777 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store *fileMetadataStore + driver graphdriver.Driver + useTarSplit bool + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + + // protect *RWLayer() methods from operating on the same name/id + locker *locker.Locker + + os string +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + Root string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + IDMapping *idtools.IdentityMapping + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool + OS string +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.Root, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.IDMapping.UIDs(), + GIDMaps: options.IDMapping.GIDs(), + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Initialized graph driver %s", driver) + + root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) + + return newStoreFromGraphDriver(root, driver, options.OS) +} + +// newStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { + if !system.IsOSSupported(os) { + return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) + } + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ms, err := newFSMetadataStore(root) + if err != nil { + return nil, err + } + + ls := &layerStore{ + store: ms, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + locker: locker.New(), + useTarSplit: !caps.ReproducesExactDiffs, + os: os, + } + + ids, mounts, err := ms.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) Driver() graphdriver.Driver { + return ls.driver +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + os, err := ls.store.getOS(layer) + if err != nil { + return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err) + } + + if os != ls.os { + return nil, fmt.Errorf("failed to load layer with os %s into layerstore for %s", os, ls.os) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + ls.mountL.Lock() + defer ls.mountL.Unlock() + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + // discard trailing data but ensure metadata is picked up to reconstruct stream + // unconditionally call io.Copy here before checking err to ensure the resources + // allocated by NewInputTarStream above are always released + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + if err != nil { + return err + } + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (_ RWLayer, err error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.locker.Lock(name) + defer ls.locker.Unlock(name) + + ls.mountL.Lock() + _, ok := ls.mounts[name] + ls.mountL.Unlock() + if ok { + return nil, ErrMountNameConflict + } + + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m := &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return + } + if err = ls.saveMount(m); err != nil { + return + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.locker.Lock(id) + defer ls.locker.Unlock(id) + + ls.mountL.Lock() + mount := ls.mounts[id] + ls.mountL.Unlock() + if mount == nil { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + mount := ls.mounts[id] + ls.mountL.Unlock() + + if mount == nil { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + name := l.Name() + ls.locker.Lock(name) + defer ls.locker.Unlock(name) + + ls.mountL.Lock() + m := ls.mounts[name] + ls.mountL.Unlock() + if m == nil { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + ls.mountL.Lock() + delete(ls.mounts, name) + ls.mountL.Unlock() + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mountL.Lock() + ls.mounts[mount.name] = mount + ls.mountL.Unlock() + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 000000000..eca1f6a83 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 000000000..002c7ff83 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd + +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 000000000..3d079a9af --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,46 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" +) + +// Getter is an interface to get the path to a layer on the host. +type Getter interface { + // GetLayerPath gets the path for the layer. This is different from Get() + // since that returns an interface to account for unmountable layers. + GetLayerPath(id string) (string, error) +} + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + if layerGetter, ok := ls.driver.(Getter); ok { + return layerGetter.GetLayerPath(rl.cacheID) + } + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path.Path(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/migration.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 000000000..12500694f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,193 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "errors" + "io" + "os" + + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/mounted_layer.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 000000000..c5d9e0e48 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,112 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + "sync" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + sync.Mutex + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.Lock() + ml.references[ref] = ref + ml.Unlock() + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + ml.Lock() + ret := len(ml.references) > 0 + ml.Unlock() + + return ret +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + ml.Lock() + defer ml.Unlock() + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.Lock() + ml.references[ref] = ref + ml.Unlock() + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 000000000..3555e8b02 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,182 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) CacheID() string { + return rl.cacheID +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + return tx.setOS(layer.layerStore.os) +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 000000000..a4f0c8088 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/defaults.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/defaults.go new file mode 100644 index 000000000..35fbcd1d8 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/defaults.go @@ -0,0 +1,213 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultCapabilities returns a Linux kernel default capabilities +func DefaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform +func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } + return DefaultLinuxSpec() +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Windows: &specs.Windows{}, + Process: &specs.Process{}, + Root: &specs.Root{}, + } +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Process: &specs.Process{ + Capabilities: &specs.LinuxCapabilities{ + Bounding: DefaultCapabilities(), + Permitted: DefaultCapabilities(), + Inheritable: DefaultCapabilities(), + Effective: DefaultCapabilities(), + }, + }, + Root: &specs.Root{}, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, + }, + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/asound", + "/proc/acpi", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + // For LCOW support, populate a blank Windows spec + if runtime.GOOS == "windows" { + s.Windows = &specs.Windows{} + } + + return s +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 000000000..46d4e1d32 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 000000000..af6dd3bda --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci // import "github.com/docker/docker/oci" + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/namespaces.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 000000000..5a2d8f208 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci // import "github.com/docker/docker/oci" + +import "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/oci.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/oci.go new file mode 100644 index 000000000..6c84ba348 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/oci/oci.go @@ -0,0 +1,67 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "fmt" + "regexp" + "strconv" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// nolint: gosimple +var deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") + +// SetCapabilities sets the provided capabilities on the spec +// All capabilities are added if privileged is true +func SetCapabilities(s *specs.Spec, caplist []string) error { + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + // setUser has already been executed here + // if non root drop capabilities in the way execve does + if s.Process.User.UID != 0 { + s.Process.Capabilities.Effective = []string{} + s.Process.Capabilities.Permitted = []string{} + } + return nil +} + +// AppendDevicePermissionsFromCgroupRules takes rules for the devices cgroup to append to the default set +func AppendDevicePermissionsFromCgroupRules(devPermissions []specs.LinuxDeviceCgroup, rules []string) ([]specs.LinuxDeviceCgroup, error) { + for _, deviceCgroupRule := range rules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return nil, fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + return devPermissions, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..bb623fa85 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1284 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping *idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping *idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMapping, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + }() + + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 000000000..0601f7b0d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,261 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{inUserNS: inUserNS} + } + return nil +} + +type overlayWhiteoutConverter struct { + inUserNS bool +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + if c.inUserNS { + if err = replaceDirWithOverlayOpaque(dir); err != nil { + return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir) + } + } else { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + if c.inUserNS { + // Ubuntu and a few distros support overlayfs in userns. + // + // Although we can't call mknod directly in userns (at least on bionic kernel 4.15), + // we can still create 0,0 char device using mknodChar0Overlay(). + // + // NOTE: we don't need this hack for the containerd snapshotter+unpack model. + if err := mknodChar0Overlay(originalPath); err != nil { + return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath) + } + } else { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} + +// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking. +// This function can be used for creating 0,0 char device in userns on Ubuntu. +// +// Steps: +// * Mkdir lower,upper,merged,work +// * Create lower/dummy +// * Mount overlayfs +// * Unlink merged/dummy +// * Unmount overlayfs +// * Make sure a 0,0 char device is created as upper/dummy +// * Rename upper/dummy to cleansedOriginalPath +func mknodChar0Overlay(cleansedOriginalPath string) error { + dir := filepath.Dir(cleansedOriginalPath) + tmp, err := ioutil.TempDir(dir, "mc0o") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { + return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return errors.Wrapf(err, "failed to unlink %s", mergedDummy) + } + if err := syscall.Unmount(merged, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + if err := isChar0(upperDummy); err != nil { + return err + } + if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath) + } + return nil +} + +func isChar0(path string) error { + osStat, err := os.Stat(path) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", path) + } + st, ok := osStat.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("got unsupported stat for %s", path) + } + if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR { + return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode) + } + if st.Rdev != 0 { + return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev) + } + return nil +} + +// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque +// xattr. The contents of the directory are preserved. +func replaceDirWithOverlayOpaque(path string) error { + if path == "/" { + return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"") + } + dir := filepath.Dir(path) + tmp, err := ioutil.TempDir(dir, "rdwoo") + if err != nil { + return errors.Wrapf(err, "failed to create a tmp directory under %s", dir) + } + defer os.RemoveAll(tmp) + // newPath is a new empty directory crafted with trusted.overlay.opaque xattr. + // we copy the content of path into newPath, remove path, and rename newPath to path. + newPath, err := createDirWithOverlayOpaque(tmp) + if err != nil { + return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp) + } + if err := fs.CopyDir(newPath, path); err != nil { + return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path) + } + if err := os.RemoveAll(path); err != nil { + return err + } + return os.Rename(newPath, path) +} + +// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr, +// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu. +func createDirWithOverlayOpaque(tmp string) (string, error) { + lower := filepath.Join(tmp, "l") + upper := filepath.Join(tmp, "u") + work := filepath.Join(tmp, "w") + merged := filepath.Join(tmp, "m") + for _, s := range []string{lower, upper, work, merged} { + if err := os.MkdirAll(s, 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %s", s) + } + } + dummyBase := "d" + lowerDummy := filepath.Join(lower, dummyBase) + if err := os.MkdirAll(lowerDummy, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) + } + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. + if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { + return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) + } + mergedDummy := filepath.Join(merged, dummyBase) + if err := os.Remove(mergedDummy); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy) + } + // upperDummy becomes a 0,0-char device file here + if err := os.Mkdir(mergedDummy, 0700); err != nil { + syscall.Unmount(merged, 0) + return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy) + } + // upperDummy becomes a directory with trusted.overlay.opaque xattr + // (but can't be verified in userns) + if err := syscall.Unmount(merged, 0); err != nil { + return "", errors.Wrapf(err, "failed to unmount %s", merged) + } + upperDummy := filepath.Join(upper, dummyBase) + return upperDummy, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 000000000..65a73354c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter { + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..d62633603 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,115 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..ae6b89fd7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,67 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..aedb91b03 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,445 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..f8792b3d4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..ba744741c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..06217b716 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,43 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..9906685e4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..57fddac07 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,480 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..3958364f5 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..a878d1bac --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/diff.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..146e21fe1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,260 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMapping, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + if runtime.GOOS != "windows" { + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) + } + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..797143ee8 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..f58bf227f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000..4c072a87e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/wrap.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..85435694c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 000000000..83ed0c6b2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,106 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func init() { + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("docker") + _, _ = net.LookupHost("localhost") +} + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver { + if idMapping == nil { + idMapping = &idtools.IdentityMapping{} + } + return &archive.Archiver{ + Untar: Untar, + IDMapping: idMapping, + } +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 000000000..ea2879dc0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,208 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("docker-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("docker-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 000000000..de87113e9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,29 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 000000000..9802fad51 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,113 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 000000000..8003136f5 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func realChroot(path string) error { + return chroot(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 000000000..7712cc17c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 000000000..d96a09f8f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 000000000..8f3f3a4a8 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 000000000..c24fea7d9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) + reexec.Register("docker-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 000000000..15ed874e7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +func init() { +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/archiver.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/archiver.go new file mode 100644 index 000000000..fed0a07d7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/archiver.go @@ -0,0 +1,205 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// TarFunc provides a function definition for a custom Tar function +type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) + +// UntarFunc provides a function definition for a custom Untar function +type UntarFunc func(io.Reader, string, *archive.TarOptions) error + +// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction +type Archiver struct { + SrcDriver Driver + DstDriver Driver + Tar TarFunc + Untar UntarFunc + IDMapping *idtools.IdentityMapping +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + tarArchive, err := archiver.SrcDriver.Open(src) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMapping.UIDs(), + GIDMaps: archiver.IDMapping.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := archiver.SrcDriver.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + + identity := idtools.Identity{UID: archiver.IDMapping.RootPair().UID, GID: archiver.IDMapping.RootPair().GID} + + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, identity); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcDriver := archiver.SrcDriver + dstDriver := archiver.DstDriver + + srcSt, err := srcDriver.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == dstDriver.Separator() { + dst = dstDriver.Join(dst, srcDriver.Base(src)) + } + + // The original call was system.MkdirAll, which is just + // os.MkdirAll on not-Windows and changed for Windows. + if dstDriver.OS() == "windows" { + // Now we are WCOW + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + } else { + // We can just use the driver.MkdirAll function + if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { + return err + } + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + errC <- func() error { + defer w.Close() + + srcF, err := srcDriver.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = dstDriver.Base(dst) + if dstDriver.OS() == "windows" { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + } else { + hdr.Mode = int64(os.FileMode(hdr.Mode)) + } + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, dstDriver.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go new file mode 100644 index 000000000..7bb1d8c36 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go @@ -0,0 +1,87 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "path/filepath" + "runtime" + + "github.com/containerd/continuity/driver" + "github.com/containerd/continuity/pathdriver" + "github.com/docker/docker/pkg/symlink" +) + +// ContainerFS is that represents a root file system +type ContainerFS interface { + // Path returns the path to the root. Note that this may not exist + // on the local system, so the continuity operations must be used + Path() string + + // ResolveScopedPath evaluates the given path scoped to the root. + // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. + // If rawPath is true, then the function will not preform any modifications + // before path resolution. Otherwise, the function will clean the given path + // by making it an absolute path. + ResolveScopedPath(path string, rawPath bool) (string, error) + + Driver +} + +// Driver combines both continuity's Driver and PathDriver interfaces with a Platform +// field to determine the OS. +type Driver interface { + // OS returns the OS where the rootfs is located. Essentially, + // runtime.GOOS for everything aside from LCOW, which is "linux" + OS() string + + // Architecture returns the hardware architecture where the + // container is located. + Architecture() string + + // Driver & PathDriver provide methods to manipulate files & paths + driver.Driver + pathdriver.PathDriver +} + +// NewLocalContainerFS is a helper function to implement daemon's Mount interface +// when the graphdriver mount point is a local path on the machine. +func NewLocalContainerFS(path string) ContainerFS { + return &local{ + path: path, + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +// NewLocalDriver provides file and path drivers for a local file system. They are +// essentially a wrapper around the `os` and `filepath` functions. +func NewLocalDriver() Driver { + return &local{ + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +type local struct { + path string + driver.Driver + pathdriver.PathDriver +} + +func (l *local) Path() string { + return l.path +} + +func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { + cleanedPath := path + if !rawPath { + cleanedPath = cleanScopedPath(path) + } + return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) +} + +func (l *local) OS() string { + return runtime.GOOS +} + +func (l *local) Architecture() string { + return runtime.GOARCH +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go new file mode 100644 index 000000000..6a9945951 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath preappends a to combine with a mnt path. +func cleanScopedPath(path string) string { + return filepath.Join(string(filepath.Separator), path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go new file mode 100644 index 000000000..9fb708462 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go @@ -0,0 +1,15 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath removes the C:\ syntax, and prepares to combine +// with a volume path +func cleanScopedPath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(filepath.Separator), path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 000000000..34f1c726f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 000000000..e40cc271b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 000000000..565396f1c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 000000000..3f1ebb656 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index ee15ed52b..47ecd0c09 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -1,7 +1,10 @@ package homedir // import "github.com/docker/docker/pkg/homedir" import ( + "errors" "os" + "path/filepath" + "strings" "github.com/docker/docker/pkg/idtools" ) @@ -19,3 +22,88 @@ func GetStatic() (string, error) { } return usr.Home, nil } + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 75ada2fe5..f0a363ded 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -11,3 +11,23 @@ import ( func GetStatic() (string, error) { return "", errors.New("homedir.GetStatic() is not supported on this system") } + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools.go index d1f173a31..b3af7a422 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "os" - "sort" "strconv" "strings" ) @@ -37,23 +36,23 @@ const ( // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, true) +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) } // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. // If the directory already exists, this function still changes ownership. // Note that unlike os.Mkdir(), this function does not return IsExist error // in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, false, true) +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) } // MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, false) +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. @@ -102,22 +101,23 @@ func toHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// IDPair is a UID and GID pair -type IDPair struct { +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { UID int GID int + SID string } -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { uids []IDMap gids []IDMap } -// NewIDMappings takes a requested user and group name and +// NewIdentityMapping takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { +func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { subuidRanges, err := parseSubuid(username) if err != nil { return nil, err @@ -133,7 +133,7 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) { return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return &IDMappings{ + return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil @@ -141,21 +141,21 @@ func NewIDMappings(username, groupname string) (*IDMappings, error) { // NewIDMappingsFromMaps creates a new mapping from two slices // Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} } // RootPair returns a uid and gid pair for the root user. The error is ignored // because a root user always exists, and the defaults are correct when the uid // and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { +func (i *IdentityMapping) RootPair() Identity { uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} + return Identity{UID: uid, GID: gid} } // ToHost returns the host UID and GID for the container uid, gid. // Remapping is only performed if the ids aren't already the remapped root ids -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { var err error target := i.RootPair() @@ -173,7 +173,7 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { } // ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { uid, err := toContainer(pair.UID, i.uids) if err != nil { return -1, -1, err @@ -183,27 +183,25 @@ func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { } // Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { +func (i *IdentityMapping) Empty() bool { return len(i.uids) == 0 && len(i.gids) == 0 } // UIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { +func (i *IdentityMapping) UIDs() []IDMap { return i.uids } // GIDs return the UID mapping // TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { +func (i *IdentityMapping) GIDs() []IDMap { return i.gids } func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} - // sort the ranges by lowest ID first - sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 1d87ea3bc..fb239743a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -21,11 +21,12 @@ var ( getentCmd string ) -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists + var paths []string stat, err := system.Stat(path) @@ -38,7 +39,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown } // short-circuit--we were called with an existing directory and chown was requested - return lazyChown(path, ownerUID, ownerGID, stat) + return lazyChown(path, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { @@ -69,7 +70,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { + if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { return err } } @@ -78,7 +79,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { +func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index d72cc2892..4ae38a1b1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -6,9 +6,11 @@ import ( "github.com/docker/docker/pkg/system" ) -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { if err := system.MkdirAll(path, mode, ""); err != nil { return err } @@ -18,6 +20,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { +func CanAccess(path string, identity Identity) bool { return true } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index dd95f3670..a68b566ce 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -4,13 +4,12 @@ import ( "encoding/json" "fmt" "io" - "os" "strings" "time" - "github.com/Nvveen/Gotty" "github.com/docker/docker/pkg/term" "github.com/docker/go-units" + "github.com/morikuni/aec" ) // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to @@ -151,60 +150,23 @@ type JSONMessage struct { Aux *json.RawMessage `json:"aux,omitempty"` } -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) } -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) } -func clearLine(out io.Writer, ti termInfo) { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[1K") - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[K") - } +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) } -func cursorUp(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cuu", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dA", l) - } -} - -func cursorDown(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cud", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dB", l) - } -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.Error != nil { if jm.Error.Code == 401 { return fmt.Errorf("authentication is required") @@ -212,8 +174,8 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { return jm.Error } var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) endl = "\r" fmt.Fprintf(out, endl) } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal @@ -230,7 +192,7 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil && termInfo != nil { + if jm.Progress != nil && isTerminal { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) @@ -248,25 +210,11 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) - ids = make(map[string]int) + ids = make(map[string]uint) ) - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - for { - diff := 0 + var diff uint var jm JSONMessage if err := dec.Decode(&jm); err != nil { if err == io.EOF { @@ -294,15 +242,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // when we output something that's not // accounted for in the map, such as a line // with no ID. - line = len(ids) + line = uint(len(ids)) ids[jm.ID] = line - if termInfo != nil { + if isTerminal { fmt.Fprintf(out, "\n") } } - diff = len(ids) - line - if termInfo != nil { - cursorUp(out, termInfo, diff) + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) } } else { // When outputting something that isn't progress @@ -310,11 +258,11 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // don't want progress entries from some previous // operation to be updated (for example, pull -a // with multiple tags). - ids = make(map[string]int) + ids = make(map[string]uint) } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - cursorDown(out, termInfo, diff) + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) } if err != nil { return err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 000000000..ce787aefb --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/locker.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 000000000..dbd47fc46 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker // import "github.com/docker/docker/pkg/locker" + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/flags.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/flags.go index 272363b68..ffd473311 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -135,15 +135,3 @@ func parseOptions(options string) (int, string) { } return flag, strings.Join(data, ",") } - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mount.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mount.go index 874aff654..be0631c63 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -2,12 +2,46 @@ package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" + "strconv" "strings" - "syscall" "github.com/sirupsen/logrus" ) +// mountError records an error from mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + // FilterFunc is a type defining a callback function // to filter out unwanted entries. It takes a pointer // to an Info struct (not fully populated, currently @@ -68,13 +102,13 @@ func Mounted(mountpoint string) (bool, error) { // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) + flag, data := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } - return ForceMount(device, target, mType, options) + return mount(device, target, mType, uintptr(flag), data) } // ForceMount will mount a filesystem according to the specified configuration, @@ -89,12 +123,7 @@ func ForceMount(device, target, mType, options string) error { // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { - err := unmount(target, mntDetach) - if err == syscall.EINVAL { - // ignore "not mounted" error - err = nil - } - return err + return unmount(target, mntDetach) } // RecursiveUnmount unmounts the target and all mounts underneath, starting with @@ -114,25 +143,14 @@ func RecursiveUnmount(target string) error { logrus.Debugf("Trying to unmount %s", m.Mountpoint) err = unmount(m.Mountpoint, mntDetach) if err != nil { - // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is - // not a mountpoint (which is ok in this case). - // Meanwhile calling `Mounted()` is very expensive. - // - // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching - // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), - // this is nicer than defining a custom value that we can refer to in each platform file. - if err == syscall.EINVAL { - continue - } - if i == len(mounts)-1 { + if i == len(mounts)-1 { // last mount if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { return err } - continue + } else { + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) } - // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem - logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) - continue } logrus.Debugf("Unmounted %s", m.Mountpoint) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go index b6ab83a23..09ad36060 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -11,11 +11,9 @@ package mount // import "github.com/docker/docker/pkg/mount" import "C" import ( - "fmt" "strings" + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -49,12 +47,13 @@ func mount(device, target, mType string, flag uintptr, data string) error { } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) + return &mountError{ + op: "mount", + source: device, + target: target, + flags: flag, + err: syscall.Errno(errno), + } } return nil } - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go index 631daf10a..a0a1ad236 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -33,25 +33,41 @@ func mount(device, target, mType string, flags uintptr, data string) error { // Initial call applying all non-propagation flags for mount // or remount with changed data if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return err + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } } } if flags&ptypes != 0 { // Change the propagation type. if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return err + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } } } if oflags&broflags == broflags { // Remount the bind to apply read only. - return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + + } } return nil } - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go index 1428dffa5..c3e5aec27 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -5,7 +5,3 @@ package mount // import "github.com/docker/docker/pkg/mount" func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index c1dba01fc..fe6e3ddba 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -7,16 +7,21 @@ import ( "os" "strconv" "strings" + + "github.com/pkg/errors" ) func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { s := bufio.NewScanner(r) out := []*Info{} + var err error for s.Scan() { - if err := s.Err(); err != nil { + if err = s.Err(); err != nil { return nil, err } /* + See http://man7.org/linux/man-pages/man5/proc.5.html + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) @@ -52,8 +57,15 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) - p.Root = fields[3] - p.Mountpoint = fields[4] + p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) + } + + p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) + } p.Opts = fields[5] var skip, stop bool diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 538f6637a..db3882874 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -3,65 +3,69 @@ package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") + return ensureMountedAs(mountPoint, SHARED) } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") + return ensureMountedAs(mountPoint, RSHARED) } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") + return ensureMountedAs(mountPoint, PRIVATE) } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") + return ensureMountedAs(mountPoint, RPRIVATE) } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") + return ensureMountedAs(mountPoint, SLAVE) } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") + return ensureMountedAs(mountPoint, RSLAVE) } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") + return ensureMountedAs(mountPoint, UNBINDABLE) } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") + return ensureMountedAs(mountPoint, RUNBINDABLE) } -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) +// MakeMount ensures that the file or directory given is a mount point, +// bind mounting it to itself it case it is not. +func MakeMount(mnt string) error { + mounted, err := Mounted(mnt) if err != nil { return err } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } + if mounted { + return nil } - if _, err = Mounted(mountPoint); err != nil { + + return mount(mnt, mnt, "none", uintptr(BIND), "") +} + +func ensureMountedAs(mnt string, flags int) error { + if err := MakeMount(mnt); err != nil { return err } - return ForceMount("", mountPoint, "none", options) + return mount("", mnt, "none", uintptr(flags), "") } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go new file mode 100644 index 000000000..4be427685 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount // import "github.com/docker/docker/pkg/mount" + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go new file mode 100644 index 000000000..a88ad3577 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount // import "github.com/docker/docker/pkg/mount" + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 000000000..94780ef61 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 000000000..6e599eebc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 000000000..8a9aa3122 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,35 @@ +// +build linux freebsd openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "bytes" + + "github.com/sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + // Remove the \x00 from the release for Atoi to parse correctly + return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 000000000..b7b15a1fd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,51 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "fmt" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return KVI, err + } + defer k.Close() + + blex, _, err := k.GetStringValue("BuildLabEx") + if err != nil { + return KVI, err + } + KVI.kvi = blex + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + dwVersion, err := windows.GetVersion() + if err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 000000000..212ff4502 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,17 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import "golang.org/x/sys/unix" + +// Utsname represents the system name structure. +// It is passthrough for unix.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname unix.Utsname + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 000000000..b2139b60e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 000000000..97906e4cd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 000000000..370e0d5b9 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,52 @@ +package plugingetter // import "github.com/docker/docker/pkg/plugingetter" + +import ( + "net" + "time" + + "github.com/docker/docker/pkg/plugins" +) + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Name() string + ScopedPath(string) string + IsV1() bool + PluginWithV1Client +} + +// PluginWithV1Client is a plugin that directly utilizes the v1/http plugin client +type PluginWithV1Client interface { + Client() *plugins.Client +} + +// PluginAddr is a plugin that exposes the socket address for creating custom clients rather than the built-in `*plugins.Client` +type PluginAddr interface { + Addr() net.Addr + Timeout() time.Duration + Protocol() string +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/client.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 000000000..035330535 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,242 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// RequestOpts is the set of options that can be passed into a request +type RequestOpts struct { + Timeout time.Duration +} + +// WithRequestTimeout sets a timeout duration for plugin requests +func WithRequestTimeout(t time.Duration) func(*RequestOpts) { + return func(o *RequestOpts) { + o.Timeout = t + } +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args, ret interface{}) error { + return c.CallWithOptions(serviceMethod, args, ret) +} + +// CallWithOptions is just like call except it takes options +func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { + var retries int + start := time.Now() + + var opts RequestOpts + for _, o := range reqOpts { + o(&opts) + } + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + cancelRequest := func() {} + if opts.Timeout > 0 { + var ctx context.Context + ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) + req = req.WithContext(ctx) + } + + resp, err := c.http.Do(req) + if err != nil { + cancelRequest() + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + cancelRequest() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return ioutils.NewReadCloserWrapper(resp.Body, func() error { + err := resp.Body.Close() + cancelRequest() + return err + }), nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 000000000..4b79bd29a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,154 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + dirEntries, err := ioutil.ReadDir(socketsPath) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) + if err != nil { + continue + } + } + + if fi.Mode()&os.ModeSocket != 0 { + names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) + } + } + + for _, p := range specsPaths { + dirEntries, err := ioutil.ReadDir(p) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) + if err != nil { + continue + } + + for _, info := range infos { + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { + fi = info + break + } + } + } + + ext := filepath.Ext(fi.Name()) + switch ext { + case ".spec", ".json": + plugin := strings.TrimSuffix(fi.Name(), ext) + names = append(names, plugin) + default: + } + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 000000000..58058f282 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 000000000..f0af3477f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 000000000..6735c304b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 000000000..28c06ff69 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,337 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "errors" + "sync" + "time" + + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +// ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. +const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// Protocol returns the protocol name/version used for plugins in this package. +func (p *Plugin) Protocol() string { + return ProtocolSchemeHTTPV1 +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 000000000..cdfbe9345 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 000000000..ddf1d786c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,7 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 000000000..76d3bdb71 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 000000000..9cb13335a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/pools/pools.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000..46339c282 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progress.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 000000000..32300914e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,93 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + // FIXME: workaround for panic in #37735 + defer func() { + recover() + }() + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 000000000..7ca07dc64 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 000000000..6658f69b6 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 000000000..efea71794 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + }, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 000000000..ceaabbdee --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 000000000..e7eed2424 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux,!windows,!freebsd,!darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +func Self() string { + return "" +} + +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 000000000..438226890 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,21 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 000000000..f8ccddd59 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/stringid/stringid.go index fa7d9166e..5fe071d62 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -2,17 +2,12 @@ package stringid // import "github.com/docker/docker/pkg/stringid" import ( - cryptorand "crypto/rand" + "crypto/rand" "encoding/hex" "fmt" - "io" - "math" - "math/big" - "math/rand" "regexp" "strconv" "strings" - "time" ) const shortLen = 12 @@ -41,10 +36,11 @@ func TruncateID(id string) string { return id } -func generateID(r io.Reader) string { +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { b := make([]byte, 32) for { - if _, err := io.ReadFull(r, b); err != nil { + if _, err := rand.Read(b); err != nil { panic(err) // This shouldn't happen } id := hex.EncodeToString(b) @@ -58,18 +54,6 @@ func generateID(r io.Reader) string { } } -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - return generateID(cryptorand.Reader) -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(readerFunc(rand.Read)) -} - // ValidateID checks whether an ID string is a valid image ID. func ValidateID(id string) error { if ok := validHex.MatchString(id); !ok { @@ -77,23 +61,3 @@ func ValidateID(id string) error { } return nil } - -func init() { - // safely set the seed globally so we generate random ids. Tries to use a - // crypto seed before falling back to time. - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - - rand.Seed(seed) -} - -type readerFunc func(p []byte) (int, error) - -func (fn readerFunc) Read(p []byte) (int, error) { - return fn(p) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 000000000..5d80670bc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 000000000..2ee8768d3 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2018 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 000000000..8dba54fd0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 000000000..7b894cde7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 000000000..c6dafcb0b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 000000000..754761717 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" + "golang.org/x/sys/windows" +) + +func toShort(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return windows.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // windows.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/args_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 000000000..b7c9487a0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index a1f6013f1..3049ff38a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -18,8 +18,6 @@ import ( const ( // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_unix.go index 4996a67c1..c2bb0f4cc 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -5,3 +5,8 @@ package system // import "github.com/docker/docker/pkg/system" // InitLCOW does nothing since LCOW is a windows only feature func InitLCOW(experimental bool) { } + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported(_ bool, _ string) bool { + return true +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_windows.go index 4910ff69d..f303aa906 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,12 +1,41 @@ package system // import "github.com/docker/docker/pkg/system" -// lcowSupported determines if Linux Containers on Windows are supported. -var lcowSupported = false +import ( + "os" -// InitLCOW sets whether LCOW is supported or not + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) + +var ( + // lcowSupported determines if Linux Containers on Windows are supported. + lcowSupported = false + + // containerdRuntimeSupported determines if ContainerD should be the runtime. + // As of March 2019, this is an experimental feature. + containerdRuntimeSupported = false +) + +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ func InitLCOW(experimental bool) { v := GetOSVersion() - if experimental && v.Build >= 16299 { + if experimental && v.Build >= osversion.RS5 { lcowSupported = true } } + +// InitContainerdRuntime sets whether to use ContainerD for runtime +// on Windows. This is an experimental feature still in development, and +// also requires an environment variable to be set (so as not to turn the +// feature on from simply experimental which would also mean LCOW. +func InitContainerdRuntime(experimental bool, cdPath string) { + if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { + logrus.Warnf("Using ContainerD runtime. This feature is experimental") + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 7477995f1..de5a1c0fb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -13,7 +14,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 000000000..b0b93196a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 000000000..188f2c295 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,24 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p := syscall.StringToUTF16(path) + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/rm.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/rm.go index 02e4d2622..b31099180 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/rm.go @@ -34,7 +34,7 @@ func EnsureRemoveAll(dir string) error { for { err := os.RemoveAll(dir) if err == nil { - return err + return nil } pe, ok := err.(*os.PathError) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 98c9eb18d..17d5d131a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: s.Rdev, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert mtim: s.Mtim}, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 3d7e2ebbe..86bb6dd55 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -59,7 +60,7 @@ func (s StatT) IsDir() bool { func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} } return fromStatT(s) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index ee7e0256f..4ae92fa6c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -2,16 +2,62 @@ package system // import "github.com/docker/docker/pkg/system" import ( "fmt" + "syscall" "unsafe" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = iota + SE_FILE_OBJECT + SE_SERVICE + SE_PRINTER + SE_REGISTRY_KEY + SE_LMSHARE + SE_KERNEL_OBJECT + SE_WINDOW_OBJECT + SE_DS_OBJECT + SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) // OSVersion is a wrapper for Windows version information @@ -125,3 +171,23 @@ func HasWin32KSupport() bool { // APIs. return ntuserApiset.Load() == nil } + +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = syscall.Errno(e1) + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/term/term_windows.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/term/term_windows.go index 64ead3c53..a3c3db131 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -62,13 +62,6 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { } } - if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { - // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as // go-ansiterm hasn't switch to x/sys/windows. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/README.md b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 000000000..d9cb367d1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 000000000..22db82129 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent // import "github.com/docker/docker/pkg/useragent" + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 000000000..6852511c5 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,311 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "fmt" + "net" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest + + modifyRuntimeSpec func(*specs.Spec) + + SwarmServiceID string + timeout time.Duration + addr net.Addr +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// ScopedPath returns the path scoped to the plugin rootfs +func (p *Plugin) ScopedPath(s string) string { + if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) { + // re-scope to the propagated mount path on the host + return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount)) + } + return filepath.Join(p.Rootfs, s) +} + +// Client returns the plugin client. +// Deprecated: use p.Addr() and manually create the client +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +// Deprecated: Hardcoded plugin client is deprecated +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if mount.Source == nil { + return fmt.Errorf("Plugin config has no mount source") + } + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if device.Path == nil { + return fmt.Errorf("Plugin config has no device path") + } + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} + +// SetSpecOptModifier sets the function to use to modify the generated +// runtime spec. +func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) { + p.mu.Lock() + p.modifyRuntimeSpec = f + p.mu.Unlock() +} + +// Timeout gets the currently configured connection timeout. +// This should be used when dialing the plugin. +func (p *Plugin) Timeout() time.Duration { + p.mu.RLock() + t := p.timeout + p.mu.RUnlock() + return t +} + +// SetTimeout sets the timeout to use for dialing. +func (p *Plugin) SetTimeout(t time.Duration) { + p.mu.Lock() + p.timeout = t + p.mu.Unlock() +} + +// Addr returns the net.Addr to use to connect to the plugin socket +func (p *Plugin) Addr() net.Addr { + p.mu.RLock() + addr := p.addr + p.mu.RUnlock() + return addr +} + +// SetAddr sets the plugin address which can be used for dialing the plugin. +func (p *Plugin) SetAddr(addr net.Addr) { + p.mu.Lock() + p.addr = addr + p.mu.Unlock() +} + +// Protocol is the protocol that should be used for interacting with the plugin. +func (p *Plugin) Protocol() string { + if p.PluginObj.Config.Interface.ProtocolScheme != "" { + return p.PluginObj.Config.Interface.ProtocolScheme + } + return plugins.ProtocolSchemeHTTPV1 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 000000000..58c432fcd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,141 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + + s.Root = &specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + if p.PluginObj.Config.PropagatedMount != "" { + pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + s.Mounts = append(s.Mounts, specs.Mount{ + Source: pRoot, + Destination: p.PluginObj.Config.PropagatedMount, + Type: "bind", + Options: []string{"rbind", "rw", "rshared"}, + }) + s.Linux.RootfsPropagation = "rshared" + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + if p.modifyRuntimeSpec != nil { + p.modifyRuntimeSpec(&s) + } + + return &s, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 000000000..5242fe124 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/settable.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 000000000..efda56470 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/errors.go new file mode 100644 index 000000000..2d294c672 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/errors.go @@ -0,0 +1,25 @@ +package reference // import "github.com/docker/docker/reference" + +type notFoundError string + +func (e notFoundError) Error() string { + return string(e) +} + +func (notFoundError) NotFound() {} + +type invalidTagError string + +func (e invalidTagError) Error() string { + return string(e) +} + +func (invalidTagError) InvalidParameter() {} + +type conflictingTagError string + +func (e conflictingTagError) Error() string { + return string(e) +} + +func (conflictingTagError) Conflict() {} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/store.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/store.go new file mode 100644 index 000000000..b942c42ca --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/reference/store.go @@ -0,0 +1,348 @@ +package reference // import "github.com/docker/docker/reference" + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist notFoundError = "reference does not exist" +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref reference.Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a reference store. +type Store interface { + References(id digest.Digest) []reference.Named + ReferencesByName(ref reference.Named) []Association + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]reference.Named +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []reference.Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { + return a[i].String() < a[j].String() +} + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { + return a[i].Ref.String() < a[j].Ref.String() +} + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.WithStack(invalidTagError("refusing to create a tag with a digest reference")) + } + return store.addReference(reference.TagNameOnly(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func favorDigest(originalRef reference.Named) (reference.Named, error) { + ref := originalRef + // If the reference includes a digest and a tag, we must store only the + // digest. + canonical, isCanonical := originalRef.(reference.Canonical) + _, isNamedTagged := originalRef.(reference.NamedTagged) + + if isCanonical && isNamedTagged { + trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + // should never happen + return originalRef, err + } + ref = trimmed + } + return ref, nil +} + +func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { + ref, err := favorDigest(ref) + if err != nil { + return err + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + if refName == string(digest.Canonical) { + return errors.WithStack(invalidTagError("refusing to create an ambiguous tag using digest algorithm as name")) + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[refName] = repository + } + + oldID, exists := repository[refStr] + + if exists { + if oldID == id { + // Nothing to do. The caller may have checked for this using store.Get in advance, but store.mu was unlocked in the meantime, so this can legitimately happen nevertheless. + return nil + } + + // force only works for tags + if digested, isDigest := ref.(reference.Canonical); isDigest { + return errors.WithStack(conflictingTagError("Cannot overwrite digest " + digested.Digest().String())) + } + + if !force { + return errors.WithStack( + conflictingTagError( + fmt.Sprintf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use the force option", refStr, oldID.String()), + ), + ) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]reference.Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref reference.Named) (bool, error) { + ref, err := favorDigest(ref) + if err != nil { + return false, err + } + + ref = reference.TagNameOnly(ref) + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists { + return false, ErrDoesNotExist + } + + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, refName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref reference.Named) (digest.Digest, error) { + if canonical, ok := ref.(reference.Canonical); ok { + // If reference contains both tag and digest, only + // lookup by digest as it takes precedence over + // tag, until tag/digest combos are stored. + if _, ok := ref.(reference.Tagged); ok { + var err error + ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + return "", err + } + } + } else { + ref = reference.TagNameOnly(ref) + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[refStr] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []reference.Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []reference.Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref reference.Named) []Association { + refName := reference.FamiliarName(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]reference.Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/auth.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/auth.go index 1f2043a0d..3f58fc6cf 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/auth.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/auth.go @@ -248,7 +248,6 @@ func (err PingResponseError) Error() string { // challenge manager for the supported authentication types and // whether v2 was confirmed by the response. If a response is received but // cannot be interpreted a PingResponseError will be returned. -// nolint: interfacer func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { var ( foundV2 = false diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/config.go index de5a526b6..6bb9258c9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/config.go @@ -19,16 +19,11 @@ type ServiceOptions struct { AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` Mirrors []string `json:"registry-mirrors,omitempty"` InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` } // serviceConfig holds daemon configuration for the registry service. type serviceConfig struct { registrytypes.ServiceConfig - V2Only bool } var ( @@ -76,7 +71,6 @@ func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. }, - V2Only: options.V2Only, } if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { return nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/registry.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/registry.go index 7a84bbfb7..6727b7dc3 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/registry.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/registry.go @@ -145,7 +145,7 @@ func trustedLocation(req *http.Request) bool { // addRequiredHeadersToRedirectedRequests adds the necessary redirection headers // for redirected requests func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { + if len(via) != 0 && via[0] != nil { if trustedLocation(req) && trustedLocation(via[0]) { req.Header = via[0].Header return nil diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service.go index b441970ff..08f5c7a4e 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service.go @@ -309,20 +309,5 @@ func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEn } func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil + return s.lookupV2Endpoints(hostname) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v1.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v1.go deleted file mode 100644 index d955ec51f..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v1.go +++ /dev/null @@ -1,40 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import "net/url" - -func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { - return []APIEndpoint{}, nil - } - - tlsConfig, err := s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v2.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v2.go index 3a56dc911..1a4c9e310 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v2.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/registry/service_v2.go @@ -57,7 +57,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "https", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, TLSConfig: tlsConfig, @@ -70,7 +70,7 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "http", Host: hostname, }, - Version: APIVersion2, + Version: APIVersion2, AllowNondistributableArtifacts: ana, TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/vendor.conf b/src/cmd/linuxkit/vendor/github.com/docker/docker/vendor.conf index 33e2dc844..3bb2cc43d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/vendor.conf +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/vendor.conf @@ -1,164 +1,166 @@ -# the following lines are in sorted order, FYI -github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 -github.com/Microsoft/hcsshim v0.6.11 -github.com/Microsoft/go-winio v0.4.8 -github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a -github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git -github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a -github.com/gorilla/context v1.1 -github.com/gorilla/mux v1.1 -github.com/Microsoft/opengcs v0.3.6 -github.com/kr/pty 5cf931ef8f -github.com/mattn/go-shellwords v1.0.3 -github.com/sirupsen/logrus v1.0.3 -github.com/tchap/go-patricia v2.2.6 -github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd -golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd -github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 -golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 -gotest.tools v2.1.0 -github.com/google/go-cmp v0.2.0 +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/Microsoft/hcsshim 672e52e9209d1e53718c1b6a7d68cc9272654ab5 +github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a +github.com/google/uuid 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 # v1.1.1 +github.com/gorilla/mux ed099d42384823742bba0bf9a72b53b55c9e2e38 # v1.7.2 +github.com/Microsoft/opengcs a10967154e143a36014584a6f664344e3bb0aa64 -github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 -github.com/imdario/mergo v0.3.5 -golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 +github.com/creack/pty 2769f65a3a94eb8f876f44a0459d24ae7ad2e488 # v1.1.7 +github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e # v1.0.2 +github.com/mattn/go-shellwords a72fbe27a1b0ed0df2f02754945044ce1456608b # v1.0.5 +github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1 +github.com/tchap/go-patricia a7f0089c6f496e8e70402f61733606daa326cac5 # v2.3.0 +golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +golang.org/x/sys 4c4f7f33c9ed00de01c4c741d2177abfcfe19307 +github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 +github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0 +golang.org/x/text f21a4dfb5e38f5895301dc265a8def02365cc3d0 # v0.3.0 +gotest.tools 1083505acf35a0bd8a696b26837e1fb3187a7a83 # v2.3.0 +github.com/google/go-cmp 3af367b6b30c263d47e8895973edcca9a49cf029 # v0.2.0 +github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 + +github.com/RackSec/srslog a4725f04ec91af1a91b380da679d6e0c2f061e59 +github.com/imdario/mergo 7c29201646fa3de8506f701213473dd407f19646 # v0.3.7 +golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c # buildkit -github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb -github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb -github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 -github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 -github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 -github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc -github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b +github.com/moby/buildkit 928f3b480d7460aacb401f68610058ffdb549aca +github.com/tonistiigi/fsutil 3d2716dd0a4d06ff854241c7e8b6f3f904e1719f +github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 +github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc +github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b +github.com/gofrs/flock 7f43ea2e6a643ad441fc12d0ecc0d3388b300c53 # v0.7.0 -#get libnetwork packages +# libnetwork -# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly -github.com/docker/libnetwork 430c00a6a6b3dfdd774f21e1abd4ad6b0216c629 -github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 -github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 -github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec -github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c -github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 -github.com/hashicorp/go-sockaddr 6d291a969b86c4b633730bfc6b8b9d64c3aafed9 -github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e -github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 -github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef -github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e +# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly +github.com/docker/libnetwork 3eb39382bfa6a3c42f83674ab080ae13b0e34e5d # bump_19.03 branch +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/errwrap 8a6fb523712970c966eefc6b39ed2c5e74880354 # v1.0.0 +github.com/hashicorp/go-sockaddr c7188e74f6acae5a989bdc959aa779f8b9f42faf # v1.0.2 +github.com/hashicorp/go-multierror 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 # v1.0.0 +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b +github.com/vishvananda/netns 7109fa855b0ff1ebef7fbd2f6aa613e8db7cfbc0 +github.com/vishvananda/netlink a2ad57a690f3caf3015351d2d6e1c0b95c349752 -# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv accordingly -github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 -github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 -github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd v3.2.1 -github.com/coreos/go-semver v0.2.0 -github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 -github.com/hashicorp/consul v0.5.2 -github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 -github.com/miekg/dns v1.0.7 -github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb +# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv.installer accordingly +github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005 # v0.3.1 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9 # v3.3.12 +github.com/coreos/go-semver 8ab6407b697782a06568d4b7f1db25550ec2e4c6 # v0.2.0 +github.com/ugorji/go b4c50a2b199d93b13dc15e78929cfb23bfdf21ab # v1.1.1 +github.com/hashicorp/consul 9a9cc9341bb487651a0399e3fc5e1e8a42e62dd9 # v0.5.2 +github.com/miekg/dns e57bf427e68187a27e22adceac868350d7a7079b # v1.0.7 +github.com/ishidawataru/sctp 6e2cb1366111dcf547c13531e3a263a067715847 +go.etcd.io/bbolt a0458a2b35708eef59eb5f620ceb3cd1c01a824d # v1.3.3 # get graph and distribution packages -github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 -github.com/vbatts/tar-split v0.10.2 -github.com/opencontainers/go-digest v1.0.0-rc1 +github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 +github.com/vbatts/tar-split 620714a4c508c880ac1bdda9c8370a2b19af1a55 # v0.11.0 +github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1 # get go-zfs packages -github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa -github.com/pborman/uuid v1.0 +github.com/mistifyio/go-zfs f784269be439d704d3dfa1906f45dd848fed2beb -google.golang.org/grpc v1.12.0 +google.golang.org/grpc 6eaf6f47437a6b4e2153a190160ef39a92c7eceb # v1.23.0 -# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal -github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1 -github.com/opencontainers/runtime-spec v1.0.1 -github.com/opencontainers/image-spec v1.0.1 -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# The version of runc should match the version that is used by the containerd +# version that is used. If you need to update runc, open a pull request in +# the containerd project first, and update both after that is merged. +# This commit does not need to match RUNC_COMMIT as it is used for helper +# packages but should be newer or equal. +github.com/opencontainers/runc 3e425f80a8c931f88e6d94a8c831b9d5aa481657 # v1.0.0-rc8-92-g84373aaa +github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db +github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 -# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) -github.com/coreos/go-systemd v17 -github.com/godbus/dbus v4.0.0 -github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf v1.1.0 +# systemd integration (journald, daemon/listeners, containerd/cgroups) +github.com/coreos/go-systemd 39ca1b05acc7ad1220e09f133283b8859a8b71ab # v17 +github.com/godbus/dbus 5f6efc7ef2759c81b7ba876593971bfce311eab3 # v4.0.0 # gelf logging driver deps -github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841 +github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841 -github.com/fluent/fluent-logger-golang v1.3.0 # fluent-logger-golang deps -github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 -github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad +github.com/fluent/fluent-logger-golang 7a6c9dcd7f14c2ed5d8c55c11b894e5455ee311b # v1.4.0 +github.com/philhofer/fwd bb6d471dc95d4fe11e432687f8b70ff496cf3136 # v1.0.0 +github.com/tinylib/msgp af6442a0fcf6e2a1b824f70dd0c734f01e817751 # v1.1.0 # fsnotify -github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1 +github.com/fsnotify/fsnotify 1485a34d5d5723fea214f5710708e19a831720e4 # v1.4.7-11-g1485a34 # awslogs deps -github.com/aws/aws-sdk-go v1.12.66 -github.com/go-ini/ini v1.25.4 -github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 +github.com/aws/aws-sdk-go 9ed0c8de252f04ac45a65358377103d5a1aa2d92 # v1.12.66 +github.com/go-ini/ini 300e940a926eb277d3901b20bdfcc54928ad3642 # v1.25.4 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 # logentries -github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf +github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf # gcplogs deps -golang.org/x/oauth2 ec22f46f877b4505e0117eeaab541714644fdd28 -google.golang.org/api de943baf05a022a8f921b544b7827bacaba1aed5 -go.opencensus.io v0.11.0 -cloud.google.com/go v0.23.0 -github.com/googleapis/gax-go v2.0.0 -google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 +golang.org/x/oauth2 ec22f46f877b4505e0117eeaab541714644fdd28 +google.golang.org/api de943baf05a022a8f921b544b7827bacaba1aed5 +go.opencensus.io c3ed530f775d85e577ca652cb052a52c078aad26 # v0.11.0 +cloud.google.com/go 0fd7230b2a7505833d5f69b75cbd6c9582401479 # v0.23.0 +github.com/googleapis/gax-go 317e0006254c44a0ac427cc52a0e083ff0b9622f # v2.0.0 +google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 # containerd -github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667 -github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c -github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b -github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 -github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08 -github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd -github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 -github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 -github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef +github.com/containerd/containerd 7c1e88399ec0b0b077121d9d5ad97e647b11c870 +github.com/containerd/fifo a9fb20d87448d386e6d50b1f2e1fa70dcf0de43c +github.com/containerd/continuity aaeac12a7ffcd198ae25440a9dff125c2e2703a7 +github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1 +github.com/containerd/console 0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f +github.com/containerd/go-runc e029b79d8cda8374981c64eba71f28ec38e5526f +github.com/containerd/typeurl 2a93cfde8c20b23de8eb84a5adbc234ddf7a9e8d +github.com/containerd/ttrpc 92c8520ef9f86600c650dd540266a007bf03670f +github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0 # cluster -github.com/docker/swarmkit edd5641391926a50bc5f7040e20b7efc05003c26 -github.com/gogo/protobuf v1.0.0 -github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a -github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 -github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e -golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad -github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git -github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 -github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 -github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 -github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -github.com/matttproud/golang_protobuf_extensions v1.0.0 -github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 -github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 +github.com/docker/swarmkit f35d9100f2c6ac810cc8d7de6e8f93dcc7a42d29 # bump_v19.03 branch +github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1 +github.com/golang/protobuf aa810b61a9c79d51363740d207bb46cf8e620ed5 # v1.2.0 +github.com/cloudflare/cfssl 5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2 +github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 +github.com/google/certificate-transparency-go 37a384cd035e722ea46e55029093e26687138edf # v1.0.20 +golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3 +golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650 +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git +github.com/hashicorp/golang-lru 7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c # v0.5.1 +github.com/coreos/pkg 3ac0863d7acf3bc44daf49afef8919af12f704ef # v3 +code.cloudfoundry.org/clock 02e53af36e6c978af692887ed449b74026d76fec + +# prometheus +github.com/prometheus/client_golang c5b7fccd204277076155f10851dad72b76a49317 # v0.8.0 +github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a +github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2 +github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801 +github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 +github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1 +github.com/grpc-ecosystem/go-grpc-prometheus c225b8c3b01faf2899099b768856a9e916e5087b # v1.2.0 # cli -github.com/spf13/cobra v0.0.3 -github.com/spf13/pflag v1.0.1 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty +github.com/spf13/cobra ef82de70bb3f60c65fb8eebacbb2d122ef517385 # v0.0.3 +github.com/spf13/pflag 583c0c0531f06d5278b7d917446061adc344b5cd # v1.0.1 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 # v1.0.0 +github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b # metrics -github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 -github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd +github.com/opencontainers/selinux 3a1f366feb7aecbf7a0e71ac4cea88b31597de9e # v1.2.2 - -# archive/tar (for Go 1.10, see https://github.com/golang/go/issues/24787) -# mkdir -p ./vendor/archive -# git clone -b go-1.10 --depth=1 git@github.com:kolyshkin/go-tar.git ./vendor/archive/tar -# vndr # to clean up test files +# DO NOT EDIT BELOW THIS LINE -------- reserved for downstream projects -------- diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/LICENSE b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/README.md b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/README.md new file mode 100644 index 000000000..dcffb31ae --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/README.md @@ -0,0 +1,22 @@ +# libtrust + +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/certificates.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/certificates.go new file mode 100644 index 000000000..3dcca33cb --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/doc.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/doc.go new file mode 100644 index 000000000..ec5d2159c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/ec_key.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/ec_key.go new file mode 100644 index 000000000..00bbe4b3c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/filter.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/filter.go new file mode 100644 index 000000000..5b2b4fca6 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/hash.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/hash.go new file mode 100644 index 000000000..a2df787dd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/jsonsign.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 000000000..cb2ca9a76 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key.go new file mode 100644 index 000000000..73642db2a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_files.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_files.go new file mode 100644 index 000000000..c526de545 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_manager.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_manager.go new file mode 100644 index 000000000..9a98ae357 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/rsa_key.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 000000000..dac4cacf2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/libtrust/util.go b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/util.go new file mode 100644 index 000000000..a5a101d3f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/LICENSE b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/LICENSE new file mode 100644 index 000000000..e2e6ae04d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 Antonio Murdaca + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/README.md b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/README.md new file mode 100644 index 000000000..b9e2a657d --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/README.md @@ -0,0 +1,311 @@ +## manifest-tool + +`manifest-tool` is a command line utility that implements a portion of the client side of the +Docker registry v2.2 API for interacting with manifest objects in a registry conforming to that +specification. + +This tool was mainly created for the purpose of viewing, creating, and pushing the +new **manifests list** object type in the Docker registry. +Manifest lists are defined in the [v2.2 image specification](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) and exist mainly +for the purpose of supporting multi-architecture and/or multi-platform images within a Docker registry. + +Note that manifest-tool was initially started as a joint project with [Harshal Patil](https://github.com/harche) from IBM Bangalore, and originally forked from the registry client codebase, skopeo, by [Antonio Murdaca/runc0m](https://github.com/runcom), that became a part of [Project Atomic](https://github.com/projectatomic/skopeo) later in its lifetime. Thanks to both Antonio and Harshal for their initial work that made this possible! Also, note that work is ongoing to add these capabilities directly to the Docker client. Thanks to Christy Perez from IBM Systems for her hard work in pushing ahead with [a docker/cli PR](https://github.com/docker/cli/pull/138) which should make this tool obsolete! + +> **UPDATE (Feb 2018):** The Docker client PR #138 is merged, providing a `docker manifest` +> command that can replace the use of `manifest-tool` in most scenarios. Other +> follow-on PRs are in process to add functionality to `docker manifest` to make +> it completely functional for all multi-platform image use cases. + +### Sample Usage + +The two main capabilities of this tool are to 1) **inspect** manifests (of all media types) within any registry supporting the Docker API and 2) to **push** manifest list objects to any registry which supports the Docker V2 API and the v2.2 image specification. + +> *Note:* For pushing to an authenticated registry like DockerHub, you will need a config generated via +`docker login`: +```sh +docker login + +``` +> *Important:* Since 17.03, Docker for Mac stores credentials in the OSX/macOS keychain and not in `config.json`. This means for users of +> `manifest-tool` on Mac, you will need to specify `--username` and `--password` instead of relying on `docker login` credentials. Note that special characters may +> need escaping depending on your shell environment when provided via command line. + +If you are pushing to a registry requiring authentication, credentials will be handled as follows: + - If the `--username` and `--password` flags are supplied, their contents will be used as the basic credentials for any actions requiring authentication. + - If `--username` and `--password` are **not** provided as command line flags, the default docker client config will be loaded (default location: `$HOME/.docker/config.json`) and credentials for the target registry will be queried if available. + - If your docker config file is not in the standard location, you can provide an alternate directory location via the `--docker-cfg` flag and the `config.json` file will be used from that alternate directory. + +The following example shows the use of `--docker-cfg` to provide an alternate directory location for `docker login`-generated credentials when pushing a manifest list: + +```sh +$ ./manifest-tool --docker-cfg '/tmp/some-docker-config/' push from-spec /home/myuser/sample.yml +``` + +#### Inspect + +You can inspect the manifest of any *repo/image:tag* combination by simply using the **inspect** command. Similar to the Docker client, if no tag is provided, *latest* is used as the tag. The output of an inspect on a manifest list media type is shown below. In the case of a manifest list the output shows all the platform-segregated details so the user can easily determine what platforms are supported by the image: + +```sh +$ ./manifest-tool inspect trollin/golang:latest +Name: trollin/golang (Type: application/vnd.docker.distribution.manifest.list.v2+json) +Digest: sha256:e001ca16da8f96ed191624c365de40d964092053a73ffa02667378ecc793dabb + * Contains 5 manifest references: +1 Mfst Type: application/vnd.docker.distribution.manifest.v2+json +1 Digest: sha256:738ff08d42047a2167c8aed758140d01df835295fa6829f49a45f014045c86b0 +1 Mfst Length: 1792 +1 Platform: +1 - OS: linux +1 - OS Vers: +1 - OS Feat: [] +1 - Arch: amd64 +1 - Variant: +1 - Feature: +1 # Layers: 7 + layer 1: digest = sha256:9f0706ba7422412cd468804fee456786f88bed94bf9aea6dde2a47f770d19d27 + layer 2: digest = sha256:d3942a742d221ef22a0a335c4eebf09e15a36dcfb224b5a2d0cdcc405f374ccb + layer 3: digest = sha256:62b1123c88f67a9ad43d9bf3f552bbe3352696a674e82712fda785db4f71a655 + layer 4: digest = sha256:3306e13140efed403fce1789f2760e1356e5b4d76f84b0a92a0ab4b769d3a447 + layer 5: digest = sha256:72bc60ec9d39d021c5af5f1b950d4b5ea15495e36166b662f608bf7272a4ef3c + layer 6: digest = sha256:2b74c5631cb8592d5eaebfc058c8c0a862d51eff640400452cd1be0089c0b53d + layer 7: digest = sha256:c3c00cee508294b26a9046e2c43d845e3c4fa9101c449f3f67d696b8e1f0b05a + +2 Mfst Type: application/vnd.docker.distribution.manifest.v2+json +2 Digest: sha256:dd7f68050650c90228ee27fb4e6a66a6e55172e6c7bb084c3b257507b1920d9b +2 Mfst Length: 1792 +2 Platform: +2 - OS: linux +2 - OS Vers: +2 - OS Feat: [] +2 - Arch: arm +2 - Variant: v7 +2 - Feature: +2 # Layers: 7 + layer 1: digest = sha256:72c70f9f7d679945bc71d954dc0c7de236e0067af495d09e9bea24f497cc79b7 + layer 2: digest = sha256:468951d2a7c0c7ac263f85ec984303dc627e7d72cf255d3b496ef2e8820fed0c + layer 3: digest = sha256:f3ba027ee390db991d1f3721300111af8180e195547e7812b36d992bf1223f8d + layer 4: digest = sha256:527515a549a64f7c7e59149599a36547266916c3f01f2a520af61731bdc5d84f + layer 5: digest = sha256:c2a8846df889425dbb7b7eec4aa23ba80b18cb827802fb2c3047f951af8e47eb + layer 6: digest = sha256:836904189352dbaa66117d2dc671f0248d98af7e5e4fd6c501331e0aadca1fe5 + layer 7: digest = sha256:a420e8eb48d7eaf57cd199b887b205c4a7e772f337abfbb99fb109d800be9cdf + +3 Mfst Type: application/vnd.docker.distribution.manifest.v2+json +3 Digest: sha256:7da99e1901b73425d45dc45779784cabd6c68c791cf99a4630177471e95a62ea +3 Mfst Length: 1792 +3 Platform: +3 - OS: linux +3 - OS Vers: +3 - OS Feat: [] +3 - Arch: 386 +3 - Variant: +3 - Feature: +3 # Layers: 7 + layer 1: digest = sha256:2fa359c89a0e952ec2fe14e3c584ee13d6ec919c73a7dcac34ba320a459e2a62 + layer 2: digest = sha256:54ddb5e3622b4571bbc0d44b29cebb14d29dbca08a475ff77342f35097464fe9 + layer 3: digest = sha256:c7e97c3de48d315e05dd5fad6a12bfeece80272f6b0c7107bb08d3de60f32f6c + layer 4: digest = sha256:c19c57d924460ac3012f2e60e9327b31862f5b4410bce307fe18f258dee273c6 + layer 5: digest = sha256:9f2e09461ebc0017a8f53a2a4a770215d72c9c95f40bd84400b39ff321f2fa0d + layer 6: digest = sha256:af0328d430b279d615d319ccba88420ac3fdff3c9d9ab9ae65ef383613181b12 + layer 7: digest = sha256:48a5a549190d0e2345a3ef52fb36992e4b3b86154648fed6a2455e394339682e + +4 Mfst Type: application/vnd.docker.distribution.manifest.v2+json +4 Digest: sha256:71c489123d96fb379a92bb62a696e140eaa24bc44e241e5917dc01f66b22b8cf +4 Mfst Length: 1792 +4 Platform: +4 - OS: linux +4 - OS Vers: +4 - OS Feat: [] +4 - Arch: ppc64le +4 - Variant: +4 - Feature: +4 # Layers: 7 + layer 1: digest = sha256:a5561821dba4ceb47be1d2f5f108a24b391df9d6a3a764d2c04ea8ac29410625 + layer 2: digest = sha256:88807427a2577c993b597a100e9caba7972e266a0a18cba8c2fe2d14f1367764 + layer 3: digest = sha256:82d997bbc6b0b5fec4c8b5fa96e4e89d98bb1ac41bfbecc0682a813fa137e4fb + layer 4: digest = sha256:b6f51579554854f79e3b930af1eaace3c8a3e9da7df41a8b3bdc97e47697a0ef + layer 5: digest = sha256:11446b02d18e4448e5359af390493245df61146672600d0be7cfd6e37310ba57 + layer 6: digest = sha256:dd61e5bdd4e1e530175062b0e32515b5c346f43ccf25c081987ae9b6b49c3a15 + layer 7: digest = sha256:7c613dd01f119499d8e7b4b1e4fdb6638f04cd20528e4b3a8e537d61c66cdc18 + +5 Mfst Type: application/vnd.docker.distribution.manifest.v2+json +5 Digest: sha256:0dc83ed60579807a0e6913e25f403755b733bf2b8415ae633c58b0eff7f53830 +5 Mfst Length: 1792 +5 Platform: +5 - OS: linux +5 - OS Vers: +5 - OS Feat: [] +5 - Arch: s390x +5 - Variant: +5 - Feature: +5 # Layers: 7 + layer 1: digest = sha256:29420dd727d39cbedfb85562111f49e24b0b96adda04de4663d2099fbbf4f993 + layer 2: digest = sha256:8df37c45a9ab1f6a86d72a13aaa358015be4fd124c6a11083f75e5371273d5dc + layer 3: digest = sha256:1533d3ea9025b772c86273232b4ee6a0dd2cb6852dbf3107db1e1aab22b744fd + layer 4: digest = sha256:7e616db8da0d96fc673928cf73007a7456efcba84a6df127e476a102dfea6f7e + layer 5: digest = sha256:70c28aac7effb022114051bd9d3df946242c838544b5d2b9c0cc128ff26cffdc + layer 6: digest = sha256:e6b51fe20471b4195349bec4a2ea07d8b4f2e69cac056f1f41c047d264b23f58 + layer 7: digest = sha256:b3c7c7b9de1680f7f0400d9757a5d8cb5963dbd28d9111ac72da620501bf2f34 +``` + +From this output we can clearly see this is a manifest list object (the media type is output as well) that has five platform definitions to support amd64, i386, s390x (z Systems), and ARMv7. To read more about manifest lists and how the Docker engine uses this information to determine what image/layers to pull read this [blog post on multi-platform support in Docker](https://integratedcode.us/2016/04/22/a-step-towards-multi-platform-docker-images/). + +#### Create/Push + +Given that the Docker client does not have a way to perform the creation/pushing of manifest list objects (although see the note above regarding the in-process PR to correct this), the main role of `manifest-tool` is to create manifest list entries and push them to a Docker registry v2.2 API-supporting repository. The classic method to define the manifest list particulars is via a YAML file. + +A sample YAML file is shown below. The cross-repository push feature is exploited in `manifest-tool` +so that the source and target image names can differ as long as they are within the same registry. +For example, a source image could be named `myprivreg:5000/someimage_ppc64le:latest` and +referenced by a manifest list in repository `myprivreg:5000/someimage:latest`. + +With a private registry running on port 5000, a sample YAML input to create a manifest list +combining a ppc64le and amd64 image would look like this: +``` +image: myprivreg:5000/someimage:latest +manifests: + - + image: myprivreg:5000/someimage:ppc64le + platform: + architecture: ppc64le + os: linux + - + image: myprivreg:5000/someimage:amd64 + platform: + architecture: amd64 + features: + - sse + os: linux +``` + +With the above YAML definition, creating the manifest list with the tool would use the following command: + +```sh +$ ./manifest-tool push from-spec someimage.yaml +``` + +In addition to the YAML file format, `manifest-tool` has the option to use command line arguments to provide the specified images/tags and platform OS/architecture details. Instead of `from-spec` you can use `from-args` with the following format: + +``` +$ ./manifest-tool push from-args \ + --platforms linux/amd64,linux/arm,linux/arm64 \ + --template foo/bar-ARCH:v1 \ + --target foo/bar:v1 +``` + +On the command line you specify the platform os/arch pairs, a template for finding the source images for each input platform pair, and a target image name. + +Specifically: + - `--platforms` specifies which platforms you want to push for in the form OS/ARCH,OS/ARCH,... + - `--template` specifies the image repo:tag source for inputs by replacing the placeholders `OS` and `ARCH` with the inputs from `--platforms`. + - `--target` specifies the target image repo:tag that will be the manifest list entry in the registry. + +##### Functional Changelog for Push/Create + + - Release **v0.5.0**: + 1. You can now specify `--ignore-missing` and if any of the input images are not available, the tool will output a warning but will not terminate. This allows for "best case" creation of manifest lists based on available images at the time. + 2. Using the YAML input option, you can leave the platform specification empty and `manifest-tool` will auto-populate the platform definition by using the source image manifest OS/arch details. Note that this is potentially deficient for cases where the image was built in a cross-compiled fashion and the source image data is incorrect as it does not match the binary OS/arch content in the image layers. + + - Release **v0.6.0**: + 1. You can specify `tags:` as a list of additional tags to push to the registry against the target manifest list name being created ([#32](https://github.com/estesp/manifest-tool/pull/32)): + +```yaml +image: myprivreg:5000/someimage:1.0.0 +tags: ['1.0', '1', 'latest'] +manifests: + ... +``` + + - Release **v0.7.0**: + 1. The output of `manifest-tool` was modified to add the size of the manifest list canonical JSON pushed to the registry. This allows manifest list content to be signed using 3rd party tools like `notary` which needs the size of the object to validate and sign the content. This is used by the [LinuxKit project](https://github.com/linuxkit/linuxkit) to create signed manifest lists of all of their container images. Example output at the end of a successful manifest list create is shown below. Note that the size field is appended to the digest hash in this version: +``` +Digest: sha256:f316f43aceb7a920a7b6c0278c76694a84f608b72bd955db7c9e24927e7edcb3 2058 +``` + +### Building + +The releases of `manifest-tool` are built using the latest Go version; currently 1.12.x. + +To build `manifest-tool`, clone this repository into your `$GOPATH`: + +```sh +$ cd $GOPATH/src +$ mkdir -p github.com/estesp +$ cd github.com/estesp +$ git clone https://github.com/estesp/manifest-tool +$ cd manifest-tool && make binary +``` + +If you do not have a local Golang environment, you can use the `make build` target to build `manifest-tool` in a Golang 1.9.1-based container environment. This will require that you have Docker installed. The `make static` target will build a statically-linked binary, and `make cross` is used to build all supported CPU architectures, creating static binaries for each platform. + +Note that signed binary releases are available on the project's [GitHub releases page](https://github.com/estesp/manifest-tool/releases) for several CPU architectures for Linux as well as OSX/macOS. + +### Using manifest-tool Without Installation + +Interested in using `manifest-tool` for simple query operations? For example, +maybe you only want to query if a specific image:tag combination is a manifest +list entry or not, and if so, what platforms are listed in the manifest. + +You can consume this feature of `manifest-tool` without installing the binary +as long as you are querying public (e.g. not private/authentication-requiring +registries) images via another project, [mquery](https://github.com/estesp/mquery). + +You can use `mquery` via a multi-platform image currently located on DockerHub +as **mplatform/mquery:latest**. For example, you can query the `mquery` image +itself with the following command + +```sh +$ docker run --rm mplatform/mquery mplatform/mquery +Image: mplatform/mquery + * Manifest List: Yes + * Supported platforms: + - linux/amd64 + - linux/arm/undefined + - linux/arm64/undefined + - linux/ppc64le + - linux/s390x + - windows/amd64:10.0.14393.1593 +``` + +Note that the `undefined` reference in the output is due to the fact that +the variant field isn't being filled out in the manifest list platform +object for this image. + +The `mquery` program itself is a small Go program that queries functions +running via [OpenWhisk](http://openwhisk.incubator.apache.org/) in [IBM Cloud Functions](https://console.bluemix.net/docs/openwhisk/index.html#getting-started-with-cloud-functions) public serverless offering. One +of those functions is packaged as a Docker container image with +`manifest-tool` installed. More information is available in the +[mquery GitHub repo](https://github.com/estesp/mquery). You can read more +of the background details in [my blog post about the Moby Summit EU talk](https://integratedcode.us/2017/11/21/moby-summit-serverless-openwhisk-multi-arch/) +on this topic. + +### Known Supporting Registries + +Not every registry that claims Docker v2 image API and format support allows manifest lists to be pushed. The errors are not always clear; it could be blocking the blob mount API calls, or the push of the manifest list media type object. At this point, the good news is that the growth of interest in manifest list images has caused quite a few popular registries to add or fix Docker v2 API and image support for manifest lists. The following is a known list of publicly available Docker v2 conformant registries which have been tested with `manifest-tool` or `docker manifest`: + + 1. [DockerHub](https://hub.docker.com): Has supported manifest lists since 2016. + 2. [Google Container Registry/gcr.io](https://cloud.google.com/container-registry/): gcr.io manifest list support was fixed in 4Q2017. + 3. [IBM Cloud Container Registry](https://www.ibm.com/cloud/container-registry): The IBM public cloud container registry supports manifest lists since the latter half of 2017. + 4. [Microsoft Azure Container Registry/azurecr.io](https://azure.microsoft.com/en-us/services/container-registry/): The Azure CR supports manifest lists. + +### Test a Registry for "Manifest List" Support + +If you operate or use a registry claiming conformance to the Docker distribution v2 API and v2.2 image +specification you may want to confirm that this image registry supports the manifest list *media type* +and the APIs used to create a manifest list. + +This GitHub repo now has a pre-configured test script which will use readily available multi-architecture +content from DockerHub and tag, push, and then combine it into a manifest list against any image registry +you point it to. See the [test-registry.sh script](https://github.com/estesp/manifest-tool/blob/master/integration/test-registry.sh) in this repo's **integration** directory +for further details. A simple use of the script is +shown below to test a private registry: +``` +$ ./test-registry.sh r.myprivreg.com/somerepo +``` + +> **Note:** This script will expect login details +> have already been provided to `docker login` and +> will use those stored credentials for push and +> API access to *somerepo* on *r.myprivreg.com*. + +### License + +`manifest-tool` is licensed under the Apache Software License (ASL) 2.0 diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/createml.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/createml.go new file mode 100644 index 000000000..90c533ebd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/createml.go @@ -0,0 +1,527 @@ +package docker + +import ( + "bytes" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + + "github.com/estesp/manifest-tool/types" +) + +// we will store up a list of blobs we must ask the registry +// to cross-mount into our target namespace +type blobMount struct { + FromRepo string + Digest string +} + +// if we have mounted blobs referenced from manifests from +// outside the target repository namespace we will need to +// push them to our target's repo as they will be references +// from the final manifest list object we push +type manifestPush struct { + Name string + Digest string + JSONBytes []byte + MediaType string +} + +// PutManifestList takes an authentication variable and a yaml spec struct and pushes an image list based on the spec +func PutManifestList(a *types.AuthInfo, yamlInput types.YAMLInput, ignoreMissing, insecure bool) (string, int, error) { + var ( + manifestList manifestlist.ManifestList + blobMountRequests []blobMount + manifestRequests []manifestPush + ) + + // process the final image name reference for the manifest list + targetRef, err := reference.ParseNormalizedNamed(yamlInput.Image) + if err != nil { + return "", 0, fmt.Errorf("Error parsing name for manifest list (%s): %v", yamlInput.Image, err) + } + targetRepo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return "", 0, fmt.Errorf("Error parsing repository name for manifest list (%s): %v", yamlInput.Image, err) + } + targetEndpoint, repoName, err := setupRepo(targetRepo, insecure) + if err != nil { + return "", 0, fmt.Errorf("Error setting up repository endpoint and references for %q: %v", targetRef, err) + } + + // Now create the manifest list payload by looking up the manifest schemas + // for the constituent images: + logrus.Info("Retrieving digests of images...") + for _, img := range yamlInput.Manifests { + mfstData, repoInfo, err := GetImageData(a, img.Image, insecure, false) + if err != nil { + // if ignoreMissing is true, we will skip this error and simply + // log a warning that we couldn't find it in the registry + if ignoreMissing { + logrus.Warnf("Couldn't find or access image reference %q. Skipping image.", img.Image) + continue + } + return "", 0, fmt.Errorf("Inspect of image %q failed with error: %v", img.Image, err) + } + if reference.Domain(repoInfo.Name) != reference.Domain(targetRepo.Name) { + return "", 0, fmt.Errorf("Cannot use source images from a different registry than the target image: %s != %s", reference.Domain(repoInfo.Name), reference.Domain(targetRepo.Name)) + } + if len(mfstData) > 1 { + // too many responses--can only happen if a manifest list was returned for the name lookup + return "", 0, fmt.Errorf("You specified a manifest list entry from a digest that points to a current manifest list. Manifest lists do not allow recursion") + } + // the non-manifest list case will always have exactly one manifest response + imgMfst := mfstData[0] + + // fill os/arch from inspected image if not specified in input YAML + if img.Platform.OS == "" && img.Platform.Architecture == "" { + // prefer a full platform object, if one is already available (and appears to have meaningful content) + if imgMfst.Platform.OS != "" || imgMfst.Platform.Architecture != "" { + img.Platform = imgMfst.Platform + } else if imgMfst.Os != "" || imgMfst.Architecture != "" { + img.Platform.OS = imgMfst.Os + img.Platform.Architecture = imgMfst.Architecture + } + } + + // if the origin image has OSFeature and/or OSVersion information, and + // these values were not specified in the creation YAML, then + // retain the origin values in the Platform definition for the manifest list: + if imgMfst.OSVersion != "" && img.Platform.OSVersion == "" { + img.Platform.OSVersion = imgMfst.OSVersion + } + if len(imgMfst.OSFeatures) > 0 && len(img.Platform.OSFeatures) == 0 { + img.Platform.OSFeatures = imgMfst.OSFeatures + } + + // validate os/arch input + if !isValidOSArch(img.Platform.OS, img.Platform.Architecture, img.Platform.Variant) { + return "", 0, fmt.Errorf("Manifest entry for image %s has unsupported os/arch or os/arch/variant combination: %s/%s/%s", img.Image, img.Platform.OS, img.Platform.Architecture, img.Platform.Variant) + } + + manifest := manifestlist.ManifestDescriptor{ + Platform: img.Platform, + } + manifest.Descriptor.Digest, err = digest.Parse(imgMfst.Digest) + manifest.Size = imgMfst.Size + manifest.MediaType = imgMfst.MediaType + + if err != nil { + return "", 0, fmt.Errorf("Digest parse of image %q failed with error: %v", img.Image, err) + } + logrus.Infof("Image %q is digest %s; size: %d", img.Image, imgMfst.Digest, imgMfst.Size) + + // if this image is in a different repo, we need to add the layer & config digests to the list of + // requested blob mounts (cross-repository push) before pushing the manifest list + if repoName != reference.Path(repoInfo.Name) { + logrus.Debugf("Adding manifest references of %q to blob mount requests", img.Image) + for _, layer := range imgMfst.References { + blobMountRequests = append(blobMountRequests, blobMount{FromRepo: reference.Path(repoInfo.Name), Digest: layer}) + } + // also must add the manifest to be pushed in the target namespace + logrus.Debugf("Adding manifest %q -> to be pushed to %q as a manifest reference", reference.Path(repoInfo.Name), repoName) + manifestRequests = append(manifestRequests, manifestPush{ + Name: reference.Path(repoInfo.Name), + Digest: imgMfst.Digest, + JSONBytes: imgMfst.CanonicalJSON, + MediaType: imgMfst.MediaType, + }) + } + manifestList.Manifests = append(manifestList.Manifests, manifest) + } + + if ignoreMissing && len(manifestList.Manifests) == 0 { + // we need to verify we at least have one valid entry in the list + // otherwise our manifest list will be totally empty + return "", 0, fmt.Errorf("all entries were skipped due to missing source image references; no manifest list to push") + } + // Set the schema version + manifestList.Versioned = manifestlist.SchemaVersion + + urlBuilder, err := v2.NewURLBuilderFromString(targetEndpoint.URL.String(), false) + if err != nil { + return "", 0, fmt.Errorf("Can't create URL builder from endpoint (%s): %v", targetEndpoint.URL.String(), err) + } + pushURL, err := createManifestURLFromRef(targetRef, urlBuilder) + if err != nil { + return "", 0, fmt.Errorf("Error setting up repository endpoint and references for %q: %v", targetRef, err) + } + logrus.Debugf("Manifest list push url: %s", pushURL) + + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + return "", 0, fmt.Errorf("Cannot deserialize manifest list: %v", err) + } + mediaType, p, err := deserializedManifestList.Payload() + logrus.Debugf("mediaType of manifestList: %s", mediaType) + if err != nil { + return "", 0, fmt.Errorf("Cannot retrieve payload for HTTP PUT of manifest list: %v", err) + + } + manifestLen := len(p) + putRequest, err := http.NewRequest("PUT", pushURL, bytes.NewReader(p)) + if err != nil { + return "", 0, fmt.Errorf("HTTP PUT request creation failed: %v", err) + } + putRequest.Header.Set("Content-Type", mediaType) + + httpClient, err := getHTTPClient(a, targetRepo, targetEndpoint, repoName) + if err != nil { + return "", 0, fmt.Errorf("Failed to setup HTTP client to repository: %v", err) + } + + // before we push the manifest list, if we have any blob mount requests, we need + // to ask the registry to mount those blobs in our target so they are available + // as references + if err := mountBlobs(httpClient, urlBuilder, targetRef, blobMountRequests); err != nil { + return "", 0, fmt.Errorf("Couldn't mount blobs for cross-repository push: %v", err) + } + + // we also must push any manifests that are referenced in the manifest list into + // the target namespace + if err := pushReferences(httpClient, urlBuilder, targetRef, manifestRequests); err != nil { + return "", 0, fmt.Errorf("Couldn't push manifests referenced in our manifest list: %v", err) + } + + resp, err := httpClient.Do(putRequest) + if err != nil { + return "", 0, fmt.Errorf("V2 registry PUT of manifest list failed: %v", err) + } + defer resp.Body.Close() + + var finalDigest string + if statusSuccess(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", 0, err + } + finalDigest = string(dgst) + } else { + return "", 0, fmt.Errorf("Registry push unsuccessful: response %d: %s", resp.StatusCode, resp.Status) + } + // if the YAML includes additional tags, push the added tag references. No other work + // should be required as we have already made sure all target blobs are cross-repo + // mounted and all referenced manifests are already pushed. + for _, tag := range yamlInput.Tags { + newRef, err := reference.WithTag(targetRef, tag) + if err != nil { + return "", 0, fmt.Errorf("Error creating tagged reference for added tag %q: %v", tag, err) + } + pushURL, err := createManifestURLFromRef(newRef, urlBuilder) + if err != nil { + return "", 0, fmt.Errorf("Error setting up repository endpoint and references for %q: %v", newRef, err) + } + logrus.Debugf("[extra tag %q] push url: %s", tag, pushURL) + putRequest, err := http.NewRequest("PUT", pushURL, bytes.NewReader(p)) + if err != nil { + return "", 0, fmt.Errorf("[extra tag %q] HTTP PUT request creation failed: %v", tag, err) + } + putRequest.Header.Set("Content-Type", mediaType) + resp, err := httpClient.Do(putRequest) + if err != nil { + return "", 0, fmt.Errorf("[extra tag %q] V2 registry PUT of manifest list failed: %v", tag, err) + } + defer resp.Body.Close() + + if statusSuccess(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", 0, err + } + if string(dgst) != finalDigest { + logrus.Warnf("Extra tag %q push resulted in non-matching digest %s (should be %s", tag, string(dgst), finalDigest) + } + } else { + return "", 0, fmt.Errorf("[extra tag %q] Registry push unsuccessful: response %d: %s", tag, resp.StatusCode, resp.Status) + } + } + return finalDigest, manifestLen, nil +} + +func getHTTPClient(a *types.AuthInfo, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, repoName string) (*http.Client, error) { + // get the http transport, this will be used in a client to upload manifest + // TODO - add separate function get client + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + DisableKeepAlives: true, + } + authConfig, err := getAuthConfig(a, repoInfo.Index) + if err != nil { + return nil, fmt.Errorf("Cannot retrieve authconfig: %v", err) + } + modifiers := registry.Headers(dockerversion.DockerUserAgent(nil), http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + challengeManager, _, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + return nil, fmt.Errorf("Ping of V2 registry failed: %v", err) + } + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := dumbCredentialStore{auth: &authConfig} + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "push", "pull") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + httpClient := &http.Client{ + Transport: tr, + CheckRedirect: checkHTTPRedirect, + } + return httpClient, nil +} + +func createManifestURLFromRef(targetRef reference.Named, urlBuilder *v2.URLBuilder) (string, error) { + // get rid of hostname so the target URL is constructed properly + hostname, name := splitHostname(targetRef.String()) + targetRef, err := getNamedRefWithoutHostname(name) + if err != nil { + return "", fmt.Errorf("Can't parse target image repository name from reference: %v", err) + } + + // Set the tag to latest, if no tag found in YAML + if _, isTagged := targetRef.(reference.NamedTagged); !isTagged { + targetRef = reference.TagNameOnly(targetRef) + } else { + tagged, _ := targetRef.(reference.NamedTagged) + targetRef, err = reference.WithTag(targetRef, tagged.Tag()) + if err != nil { + return "", fmt.Errorf("Error referencing specified tag to target repository name: %v", err) + } + } + + manifestURL, err := buildManifestURL(urlBuilder, hostname, targetRef) + if err != nil { + return "", fmt.Errorf("Failed to build manifest URL from target reference: %v", err) + } + return manifestURL, nil +} + +func setupRepo(repoInfo *registry.RepositoryInfo, insecure bool) (registry.APIEndpoint, string, error) { + + options := registry.ServiceOptions{} + if insecure { + options.InsecureRegistries = append(options.InsecureRegistries, reference.Domain(repoInfo.Name)) + } + registryService, err := registry.NewService(options) + if err != nil { + return registry.APIEndpoint{}, "", err + } + + endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return registry.APIEndpoint{}, "", err + } + logrus.Debugf("endpoints: %v", endpoints) + // take highest priority endpoint + endpoint := endpoints[0] + // if insecure, and there is an "http" endpoint, prefer that + if insecure { + for _, ep := range endpoints { + if ep.URL.Scheme == "http" { + endpoint = ep + } + } + endpoint.TLSConfig.InsecureSkipVerify = true + } + + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the Name's path instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + logrus.Debugf("repoName: %v", repoName) + } + return endpoint, repoName, nil +} + +func pushReferences(httpClient *http.Client, urlBuilder *v2.URLBuilder, ref reference.Named, manifests []manifestPush) error { + // for each referenced manifest object in the manifest list (that is outside of our current repo/name) + // we need to push by digest the manifest so that it is added as a valid reference in the current + // repo. This will allow us to push the manifest list properly later and have all valid references. + + // first get rid of possible hostname so the target URL is constructed properly + hostname, name := splitHostname(ref.String()) + ref, err := getNamedRefWithoutHostname(name) + if err != nil { + return fmt.Errorf("Error parsing repo/name portion of reference without hostname: %s: %v", name, err) + } + for _, manifest := range manifests { + dgst, err := digest.Parse(manifest.Digest) + if err != nil { + return fmt.Errorf("Error parsing manifest digest (%s) for referenced manifest %q: %v", manifest.Digest, manifest.Name, err) + } + targetRef, err := reference.WithDigest(ref, dgst) + if err != nil { + return fmt.Errorf("Error creating manifest digest target for referenced manifest %q: %v", manifest.Name, err) + } + pushURL, err := buildManifestURL(urlBuilder, hostname, targetRef) + if err != nil { + return fmt.Errorf("Error setting up manifest push URL for manifest references for %q: %v", manifest.Name, err) + } + logrus.Debugf("manifest reference push URL: %s", pushURL) + + pushRequest, err := http.NewRequest("PUT", pushURL, bytes.NewReader(manifest.JSONBytes)) + if err != nil { + return fmt.Errorf("HTTP PUT request creation for manifest reference push failed: %v", err) + } + pushRequest.Header.Set("Content-Type", manifest.MediaType) + resp, err := httpClient.Do(pushRequest) + if err != nil { + return fmt.Errorf("PUT of manifest reference failed: %v", err) + } + + resp.Body.Close() + if !statusSuccess(resp.StatusCode) { + return fmt.Errorf("Referenced manifest push unsuccessful: response %d: %s", resp.StatusCode, resp.Status) + } + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgstResult, err := digest.Parse(dgstHeader) + if err != nil { + return fmt.Errorf("Couldn't parse pushed manifest digest response: %v", err) + } + if string(dgstResult) != manifest.Digest { + return fmt.Errorf("Pushed referenced manifest received a different digest: expected %s, got %s", manifest.Digest, string(dgst)) + } + logrus.Debugf("referenced manifest %q pushed; digest matches: %s", manifest.Name, string(dgst)) + } + return nil +} + +func mountBlobs(httpClient *http.Client, urlBuilder *v2.URLBuilder, ref reference.Named, blobsRequested []blobMount) error { + // get rid of hostname so the target URL is constructed properly + hostname, name := splitHostname(ref.String()) + targetRef, err := getNamedRefWithoutHostname(name) + if err != nil { + return fmt.Errorf("Can't parse reference without hostname: %v", err) + } + + for _, blob := range blobsRequested { + // create URL request + url, err := buildBlobUploadURL(urlBuilder, hostname, targetRef, url.Values{"from": {blob.FromRepo}, "mount": {blob.Digest}}) + if err != nil { + return fmt.Errorf("Failed to create blob mount URL: %v", err) + } + mountRequest, err := http.NewRequest("POST", url, nil) + if err != nil { + return fmt.Errorf("HTTP POST request creation for blob mount failed: %v", err) + } + mountRequest.Header.Set("Content-Length", "0") + resp, err := httpClient.Do(mountRequest) + if err != nil { + return fmt.Errorf("V2 registry POST of blob mount failed: %v", err) + } + + resp.Body.Close() + if !statusSuccess(resp.StatusCode) { + return fmt.Errorf("Blob mount failed to url %s: HTTP status %d", url, resp.StatusCode) + } + logrus.Debugf("Mount of blob %s succeeded, location: %q", blob.Digest, resp.Header.Get("Location")) + } + return nil +} + +func buildManifestURL(ub *v2.URLBuilder, hostname string, targetRef reference.Named) (string, error) { + if !isHubLibraryRef(targetRef, hostname) { + return ub.BuildManifestURL(targetRef) + } + // this is a library reference and we don't want to lose the "library/" part of the URL ref + baseURL, err := ub.BuildBaseURL() + if err != nil { + return "", err + } + tagOrDigest := "" + switch v := targetRef.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + baseURL = fmt.Sprintf("%s%s/%s/%s", baseURL, reference.Path(targetRef), "manifests", tagOrDigest) + return baseURL, nil +} + +func buildBlobUploadURL(ub *v2.URLBuilder, hostname string, targetRef reference.Named, values url.Values) (string, error) { + if !isHubLibraryRef(targetRef, hostname) { + return ub.BuildBlobUploadURL(targetRef, values) + } + // this is a library reference and we don't want to lose the "library/" part of the URL ref + baseURL, err := ub.BuildBaseURL() + if err != nil { + return "", err + } + baseURL = fmt.Sprintf("%s%s/%s", baseURL, reference.Path(targetRef), "blobs/uploads/") + return appendValues(baseURL, values), nil +} + +func isHubLibraryRef(targetRef reference.Named, hostname string) bool { + return strings.HasPrefix(reference.Path(targetRef), DefaultRepoPrefix) && hostname == DefaultHostname +} + +func getNamedRefWithoutHostname(ref string) (reference.Named, error) { + targetRef, err := reference.Parse(ref) + if err != nil { + return nil, fmt.Errorf("Can't parse reference without hostname: %v", err) + } + named, isNamed := targetRef.(reference.Named) + if !isNamed { + return nil, fmt.Errorf("Parsed reference is not a Named object: %s", ref) + } + return named, nil +} + +// NOTE: these two functions are copied from github.com/docker/distribution/registry/api/v2/urls.go +// to handle the issue of needing to preserve non-normalized names for pushing to "library/" on +// DockerHub +// +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} + +func statusSuccess(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect.go new file mode 100644 index 000000000..939c97af1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect.go @@ -0,0 +1,440 @@ +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "syscall" + "time" + + "github.com/docker/cli/cli/config" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/api" + engineTypes "github.com/docker/docker/api/types" + registryTypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/registry" + "github.com/estesp/manifest-tool/types" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +const ( + // DefaultHostname is the default built-in registry (DockerHub) + DefaultHostname = "docker.io" + // LegacyDefaultHostname is the old hostname used for DockerHub + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for official images in DockerHub + DefaultRepoPrefix = "library/" +) + +type existingTokenHandler struct { + token string +} + +type dumbCredentialStore struct { + auth *engineTypes.AuthConfig +} + +func (dcs dumbCredentialStore) Basic(*url.URL) (string, string) { + return dcs.auth.Username, dcs.auth.Password +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} + +func (dcs dumbCredentialStore) RefreshToken(*url.URL, string) string { + return dcs.auth.IdentityToken +} + +func (dcs dumbCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +type manifestFetcher interface { + Fetch(ctx context.Context, ref reference.Named) ([]types.ImageInspect, error) +} + +func validateName(name string) error { + distref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return err + } + hostname, _ := splitHostname(distref.String()) + if hostname == "" { + return fmt.Errorf("Please use a fully qualified repository name") + } + return nil +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// GetImageData takes registry authentication information and a name of the image to return information about +func GetImageData(a *types.AuthInfo, name string, insecure, includeTags bool) ([]types.ImageInspect, *registry.RepositoryInfo, error) { + if err := validateName(name); err != nil { + return nil, nil, err + } + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, err + } + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, nil, err + } + authConfig, err := getAuthConfig(a, repoInfo.Index) + if err != nil { + return nil, nil, err + } + if err := validateRepoName(repoInfo.Name.Name()); err != nil { + return nil, nil, err + } + options := registry.ServiceOptions{} + if insecure { + options.InsecureRegistries = append(options.InsecureRegistries, reference.Domain(repoInfo.Name)) + } + registryService, err := registry.NewService(options) + if err != nil { + return nil, nil, err + } + + endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return nil, nil, err + } + logrus.Debugf("endpoints: %v", endpoints) + + var ( + ctx = context.Background() + lastErr error + discardNoSupportErrors bool + foundImages []types.ImageInspect + confirmedV2 bool + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + // make sure I can reach the registry, same as docker pull does + if endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s; manifest list requires v2", endpoint.URL) + continue + } + if insecure && endpoint.URL.Scheme == "https" { + logrus.Debugf("Skipping https endpoint for insecure registry") + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + if insecure { + endpoint.TLSConfig.InsecureSkipVerify = true + } + + logrus.Debugf("Trying to fetch image manifest of %s repository from %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + fetcher, err := newManifestFetcher(endpoint, repoInfo, authConfig, registryService, includeTags) + if err != nil { + lastErr = err + continue + } + + if foundImages, err = fetcher.Fetch(ctx, ref); err != nil { + // Was this fetch cancelled? If so, don't try to fall back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(distribution.ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // save the current error + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + lastErr = err + } + continue + } + logrus.Infof("Not continuing with pull after error: %v", err) + return nil, nil, err + } + + return foundImages, repoInfo, nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return nil, nil, lastErr +} + +func newManifestFetcher(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, authConfig engineTypes.AuthConfig, registryService registry.Service, includeTags bool) (manifestFetcher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2ManifestFetcher{ + endpoint: endpoint, + authConfig: authConfig, + service: registryService, + repoInfo: repoInfo, + includeTags: includeTags, + }, nil + case registry.APIVersion1: + return &v1ManifestFetcher{ + endpoint: endpoint, + authConfig: authConfig, + service: registryService, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +func getAuthConfig(a *types.AuthInfo, index *registryTypes.IndexInfo) (engineTypes.AuthConfig, error) { + + var ( + username = a.Username + password = a.Password + cfg = a.DockerCfg + defAuthConfig = engineTypes.AuthConfig{ + Username: a.Username, + Password: a.Password, + Email: "stub@example.com", + } + ) + + if username != "" && password != "" { + return defAuthConfig, nil + } + + confFile, err := config.Load(cfg) + if err != nil { + return engineTypes.AuthConfig{}, err + } + authConfig := registry.ResolveAuthConfig(confFile.AuthConfigs, index) + logrus.Debugf("authConfig for %s: %v", index.Name, authConfig.Username) + + return authConfig, nil +} + +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func makeImageInspect(img *image.Image, tag string, mfInfo manifestInfo, mediaType string, tagList []string) *types.ImageInspect { + var digest string + if err := mfInfo.digest.Validate(); err == nil { + digest = mfInfo.digest.String() + } + + // for manifest lists, we only want to display the basic info that this is + // a manifest list and its digest information: + if mediaType == manifestlist.MediaTypeManifestList { + return &types.ImageInspect{ + MediaType: mediaType, + Digest: digest, + } + } + + var digests []string + for _, blobDigest := range mfInfo.blobDigests { + digests = append(digests, blobDigest.String()) + } + return &types.ImageInspect{ + Size: mfInfo.length, + MediaType: mediaType, + Tag: tag, + Digest: digest, + RepoTags: tagList, + Comment: img.Comment, + Created: img.Created.Format(time.RFC3339Nano), + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + OSVersion: img.OSVersion, + OSFeatures: img.OSFeatures, + References: digests, + Layers: mfInfo.layers, + Platform: mfInfo.platform, + CanonicalJSON: mfInfo.jsonBytes, + } +} + +func makeRawConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) (map[string]*json.RawMessage, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, "1.8.3") + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return c, nil +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case distribution.ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v1.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v1.go new file mode 100644 index 000000000..70b7537a1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v1.go @@ -0,0 +1,173 @@ +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + engineTypes "github.com/docker/docker/api/types" + dockerdistribution "github.com/docker/docker/distribution" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/registry" + "github.com/estesp/manifest-tool/types" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +type v1ManifestFetcher struct { + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + repo distribution.Repository + confirmedV2 bool + // wrap in a config? + authConfig engineTypes.AuthConfig + service registry.Service + session *registry.Session +} + +func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) ([]types.ImageInspect, error) { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return nil, fallbackError{ + err: dockerdistribution.ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}, + } + } + tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name) + if err != nil { + return nil, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + registry.NewTransport(tlsConfig), + //registry.Headers(mf.config.MetaHeaders)..., + registry.Headers(dockerversion.DockerUserAgent(nil), nil)..., + ) + client := registry.HTTPClient(tr) + //v1Endpoint := mf.endpoint.ToV1Endpoint(mf.config.MetaHeaders) + v1Endpoint := mf.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(nil), nil) + mf.session, err = registry.NewSession(client, &mf.authConfig, v1Endpoint) + if err != nil { + logrus.Debugf("Fallback from error: %s", err) + return nil, fallbackError{err: err} + } + + imgsInspect, err := mf.fetchWithSession(ctx, ref) + if err != nil { + return nil, err + } + if len(imgsInspect) > 1 { + return nil, fmt.Errorf("Found more than one image in V1 fetch!? %v", imgsInspect) + } + imgsInspect[0].MediaType = schema1.MediaTypeManifest + return imgsInspect, nil +} + +func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, ref reference.Named) ([]types.ImageInspect, error) { + var ( + imageList = []types.ImageInspect{} + pulledImg *image.Image + ) + repoData, err := mf.session.GetRepositoryData(mf.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return nil, fmt.Errorf("Error: image %s not found", reference.Path(mf.repoInfo.Name)) + } + // Unexpected HTTP error + return nil, err + } + + var tagsList map[string]string + tagsList, err = mf.session.GetRemoteTags(repoData.Endpoints, mf.repoInfo.Name) + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return nil, err + } + + logrus.Debugf("Retrieving the tag list") + tagged, isTagged := ref.(reference.NamedTagged) + var tagID, tag string + if isTagged { + tag = tagged.Tag() + tagsList[tagged.Tag()] = tagID + } else { + ref = reference.TagNameOnly(ref) + tagged, _ := ref.(reference.NamedTagged) + tag = tagged.Tag() + tagsList[tagged.Tag()] = tagID + } + tagID, err = mf.session.GetRemoteTag(repoData.Endpoints, mf.repoInfo.Name, tag) + if err == registry.ErrRepoNotFound { + return nil, fmt.Errorf("Tag %s not found in repository %s", tag, mf.repoInfo.Name.Name()) + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return nil, err + } + + tagList := []string{} + for tag := range tagsList { + tagList = append(tagList, tag) + } + + img := repoData.ImgList[tagID] + + for _, ep := range mf.repoInfo.Index.Mirrors { + if pulledImg, err = mf.pullImageJSON(img.ID, ep); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image json of %s:%s, mirror: %s, %s", mf.repoInfo.Name.Name(), img.Tag, ep, err) + continue + } + break + } + if pulledImg == nil { + for _, ep := range repoData.Endpoints { + if pulledImg, err = mf.pullImageJSON(img.ID, ep); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + logrus.Infof("Error pulling image json of %s:%s, endpoint: %s, %v", mf.repoInfo.Name.Name(), img.Tag, ep, err) + continue + } + break + } + } + if err != nil { + return nil, fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, mf.repoInfo.Name.Name(), err) + } + if pulledImg == nil { + return nil, fmt.Errorf("No such image %s:%s", mf.repoInfo.Name.Name(), tag) + } + + imageInsp := makeImageInspect(pulledImg, tag, manifestInfo{}, schema1.MediaTypeManifest, tagList) + imageList = append(imageList, *imageInsp) + return imageList, nil +} + +func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string) (*image.Image, error) { + imgJSON, _, err := mf.session.GetRemoteImageJSON(imgID, endpoint) + if err != nil { + return nil, err + } + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return nil, err + } + configRaw, err := makeRawConfigFromV1Config(imgJSON, image.NewRootFS(), []image.History{h}) + if err != nil { + return nil, err + } + config, err := json.Marshal(configRaw) + if err != nil { + return nil, err + } + img, err := image.NewFromJSON(config) + if err != nil { + return nil, err + } + return img, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v2.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v2.go new file mode 100644 index 000000000..fde3ca193 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/inspect_v2.go @@ -0,0 +1,533 @@ +package docker + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + engineTypes "github.com/docker/docker/api/types" + dockerdistribution "github.com/docker/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/registry" + "github.com/estesp/manifest-tool/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +type v2ManifestFetcher struct { + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + repo distribution.Repository + confirmedV2 bool + includeTags bool + authConfig engineTypes.AuthConfig + service registry.Service +} + +type manifestInfo struct { + blobDigests []digest.Digest + layers []string + digest digest.Digest + platform manifestlist.PlatformSpec + length int64 + jsonBytes []byte +} + +func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) ([]types.ImageInspect, error) { + var err error + + mf.repo, mf.confirmedV2, err = dockerdistribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, nil, &mf.authConfig, "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return nil, err + } + + images, err := mf.fetchWithRepository(ctx, ref) + if err != nil { + if _, ok := err.(fallbackError); ok { + return nil, err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2, transportOK: true} + } + } + for _, img := range images { + img.MediaType = schema2.MediaTypeManifest + } + return images, err +} + +func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref reference.Named) ([]types.ImageInspect, error) { + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + tagList []string + imageList = []types.ImageInspect{} + ) + + manSvc, err := mf.repo.Manifests(ctx) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return nil, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return nil, err + } + tagOrDigest = digested.Digest().String() + } else { + return nil, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + mf.confirmedV2 = true + + if mf.includeTags { + tagList, err = mf.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + if !strings.Contains(err.Error(), "unauthorized") { + // only error out if the the "list all tags" endpoint isn't blocked by the registry + // some registries may have a reason to not allow complete tag list queries + return nil, allowV1Fallback(err) + } + } + } + + var ( + images []*image.Image + mfInfos []manifestInfo + mediaType []string + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + image, mfInfo, err := mf.pullSchema1(ctx, ref, v) + images = append(images, image) + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema1.MediaTypeManifest) + if err != nil { + return nil, err + } + case *schema2.DeserializedManifest: + image, mfInfo, err := mf.pullSchema2(ctx, ref, v) + images = append(images, image) + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema2.MediaTypeManifest) + if err != nil { + return nil, err + } + case *manifestlist.DeserializedManifestList: + images, mfInfos, mediaType, err = mf.pullManifestList(ctx, ref, v) + if err != nil { + return nil, err + } + default: + return nil, errors.New("unsupported manifest format") + } + + for idx, img := range images { + imgReturn := makeImageInspect(img, tagOrDigest, mfInfos[idx], mediaType[idx], tagList) + imageList = append(imageList, *imgReturn) + } + return imageList, nil +} + +func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, mfInfo manifestInfo, err error) { + mfInfo = manifestInfo{} + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return nil, mfInfo, err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return nil, mfInfo, err + } + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return nil, mfInfo, err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return nil, mfInfo, err + } + history = append(history, h) + mfInfo.blobDigests = append(mfInfo.blobDigests, verifiedManifest.FSLayers[i].BlobSum) + } + + seen := make(map[string]bool) + for i := 0; i < len(verifiedManifest.FSLayers); i++ { + digest := verifiedManifest.FSLayers[i].BlobSum.String() + if _, ok := seen[digest]; ok { + continue + } + seen[digest] = true + mfInfo.layers = append(mfInfo.layers, digest) + } + + rootFS := image.NewRootFS() + configRaw, _ := makeRawConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) + + config, err := json.Marshal(configRaw) + if err != nil { + return nil, mfInfo, err + } + + img, err = image.NewFromJSON(config) + if err != nil { + return nil, mfInfo, err + } + + mfInfo.digest = digest.FromBytes(unverifiedManifest.Canonical) + // add the size of the manifest to the info struct; needed for assembling proper + // manifest lists + mfInfo.length = int64(len(unverifiedManifest.Canonical)) + mfInfo.jsonBytes, err = unverifiedManifest.MarshalJSON() + if err != nil { + return nil, mfInfo, err + } + return img, mfInfo, nil +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func (mf *v2ManifestFetcher) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (img *image.Image, mfInfo manifestInfo, err error) { + mfInfo.digest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return nil, mfInfo, err + } + + target := mfst.Target() + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := mf.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, mfInfo, err + } + if unmarshalledConfig.RootFS == nil { + return nil, mfInfo, errors.New("image config has no rootfs section") + } + } + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, mfInfo, err + } + } + + // collect all references so that we can ask for cross-repo blob + // mount in the cases where a manifest list is created from images + // outside the target repo + for _, descriptor := range mfst.References() { + mfInfo.blobDigests = append(mfInfo.blobDigests, descriptor.Digest) + } + for _, descriptor := range mfst.Layers { + mfInfo.layers = append(mfInfo.layers, descriptor.Digest.String()) + } + + img, err = image.NewFromJSON(configJSON) + if err != nil { + return nil, mfInfo, err + } + // add the size of the manifest to the image response; needed for assembling proper + // manifest lists + _, mfBytes, err := mfst.Payload() + if err != nil { + return nil, mfInfo, err + } + mfInfo.length = int64(len(mfBytes)) + mfInfo.jsonBytes = mfBytes + return img, mfInfo, nil +} + +func (mf *v2ManifestFetcher) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := mf.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{err: err, confirmedV2: false, transportOK: true} + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{err: err, confirmedV2: false, transportOK: true} + } + } + return err +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (mf *v2ManifestFetcher) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) ([]*image.Image, []manifestInfo, []string, error) { + var ( + imageList = []*image.Image{} + mfInfos = []manifestInfo{} + mediaType = []string{} + ) + manifestListDigest, err := schema2ManifestDigest(ref, mfstList) + if err != nil { + return nil, nil, nil, err + } + logrus.Debugf("Pulling manifest list entries for ML digest %v", manifestListDigest) + + // for displaying basic information on the "outer" manifest list entry, we will + // create the first entry in the returned arrays to hold the manifest list details: + mfInfos = append(mfInfos, manifestInfo{digest: manifestListDigest}) + mediaType = append(mediaType, mfstList.MediaType) + imageList = append(imageList, &image.Image{}) + + for _, manifestDescriptor := range mfstList.Manifests { + manSvc, err := mf.repo.Manifests(ctx) + if err != nil { + return nil, nil, nil, err + } + + thisDigest := manifestDescriptor.Digest + thisPlatform := manifestDescriptor.Platform + manifest, err := manSvc.Get(ctx, thisDigest) + if err != nil { + return nil, nil, nil, err + } + + manifestRef, err := reference.WithDigest(ref, thisDigest) + if err != nil { + return nil, nil, nil, err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + img, mfInfo, err := mf.pullSchema1(ctx, manifestRef, v) + imageList = append(imageList, img) + mfInfo.platform = thisPlatform + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema1.MediaTypeManifest) + if err != nil { + return nil, nil, nil, err + } + case *schema2.DeserializedManifest: + img, mfInfo, err := mf.pullSchema2(ctx, manifestRef, v) + imageList = append(imageList, img) + mfInfo.platform = thisPlatform + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema2.MediaTypeManifest) + if err != nil { + return nil, nil, nil, err + } + default: + return nil, nil, nil, errors.New("unsupported manifest format") + } + } + + return imageList, mfInfos, mediaType, err +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/util.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/util.go new file mode 100644 index 000000000..c5ccdb3e2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/docker/util.go @@ -0,0 +1,54 @@ +package docker + +import "fmt" + +// list of valid os/arch values (see "Optional Environment Variables" section +// of https://golang.org/doc/install/source +// Added linux/s390x as we know System z support already exists + +var validOSArch = map[string]bool{ + "darwin/386": true, + "darwin/amd64": true, + "darwin/arm": true, + "darwin/arm64": true, + "dragonfly/amd64": true, + "freebsd/386": true, + "freebsd/amd64": true, + "freebsd/arm": true, + "linux/386": true, + "linux/amd64": true, + "linux/arm": true, + "linux/arm/v5": true, + "linux/arm/v6": true, + "linux/arm/v7": true, + "linux/arm64": true, + "linux/arm64/v8": true, + "linux/ppc64": true, + "linux/ppc64le": true, + "linux/mips64": true, + "linux/mips64le": true, + "linux/s390x": true, + "netbsd/386": true, + "netbsd/amd64": true, + "netbsd/arm": true, + "openbsd/386": true, + "openbsd/amd64": true, + "openbsd/arm": true, + "plan9/386": true, + "plan9/amd64": true, + "solaris/amd64": true, + "windows/386": true, + "windows/amd64": true, + "windows/arm": true, +} + +func isValidOSArch(os string, arch string, variant string) bool { + osarch := fmt.Sprintf("%s/%s", os, arch) + + if variant != "" { + osarch = fmt.Sprintf("%s/%s/%s", os, arch, variant) + } + + _, ok := validOSArch[osarch] + return ok +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/types/types.go b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/types/types.go new file mode 100644 index 000000000..e6f0dae57 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/types/types.go @@ -0,0 +1,51 @@ +package types + +import ( + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/docker/api/types/container" +) + +// ImageInspect holds information about an image in a registry +type ImageInspect struct { + Size int64 + MediaType string + Tag string + Digest string + RepoTags []string + Comment string + Created string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OSVersion string + OSFeatures []string + Layers []string + References []string + Platform manifestlist.PlatformSpec + CanonicalJSON []byte +} + +// YAMLInput represents the YAML format input to the pushml +// command. +type YAMLInput struct { + Image string + Tags []string + Manifests []ManifestEntry +} + +// ManifestEntry represents an entry in the list of manifests to +// be combined into a manifest list, provided via the YAML input +type ManifestEntry struct { + Image string + Platform manifestlist.PlatformSpec +} + +// AuthInfo holds information about how manifest-tool should connect and authenticate to the docker registry +type AuthInfo struct { + Username string + Password string + DockerCfg string +} diff --git a/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/vendor.conf b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/vendor.conf new file mode 100755 index 000000000..95b659a85 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/estesp/manifest-tool/vendor.conf @@ -0,0 +1,49 @@ +github.com/codegangsta/cli v1.2.0 +github.com/gorilla/mux master +github.com/go-yaml/yaml v2 +github.com/docker/cli 18.09 +github.com/docker/docker 71e07f91307a9cb51071c6510768139c1f436750 https://github.com/docker/engine # 19.03 head +github.com/docker/docker-credential-helpers 5241b46610f2491efdf9d1c85f1ddf5b02f6d962 + +golang.org/x/crypto 88737f569e3a9c7ab309cdc09a07fe7fc87233c3 +golang.org/x/time fbb02b2291d28baffd63558aa44b4b56f178d650 +github.com/sirupsen/logrus 8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f # v1.4.1 +golang.org/x/net f3200d17e092c607f615320ecaad13d87ad9a2b3 +golang.org/x/sys 4c4f7f33c9ed00de01c4c741d2177abfcfe19307 + +github.com/docker/distribution 0d3efadf0154c2b8a4e7b6621fff9809655cc580 +github.com/vbatts/tar-split 620714a4c508c880ac1bdda9c8370a2b19af1a55 # v0.11.0 +github.com/opencontainers/go-digest 279bed98673dd5bef374d3b6e4b09e2af76183bf # v1.0.0-rc1 + +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/docker/go-units 519db1ee28dcc9fd2474ae59fca29a810482bfb1 # v0.4.0 +github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55 # v0.4.0 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +github.com/opencontainers/runc 3e425f80a8c931f88e6d94a8c831b9d5aa481657 # v1.0.0-rc8-92-g84373aaa +github.com/opencontainers/runtime-spec 29686dbc5559d93fb1ef402eeda3e35c38d75af4 # v1.0.1-59-g29686db +github.com/opencontainers/image-spec d60099175f88c47cd379c4738d158884749ed235 # v1.0.1 + +# prometheus +github.com/prometheus/client_golang c5b7fccd204277076155f10851dad72b76a49317 # v0.8.0 +github.com/beorn7/perks e7f67b54abbeac9c40a31de0f81159e4cafebd6a +github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2 +github.com/prometheus/common 7600349dcfe1abd18d72d3a1770870d9800a7801 +github.com/prometheus/procfs 7d6f385de8bea29190f15ba9931442a0eaef9af7 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c # v1.0.1 +github.com/pkg/errors ba968bfe8b2f7e042a574c888954fccecfa385b4 # v0.8.1 + +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/Microsoft/hcsshim 672e52e9209d1e53718c1b6a7d68cc9272654ab5 +github.com/Microsoft/go-winio 6c72808b55902eae4c5943626030429ff20f3b63 # v0.4.14 +github.com/konsorten/go-windows-terminal-sequences f55edac94c9bbba5d6182a4be46d86a2c9b5b50e # v1.0.2 + +google.golang.org/grpc v1.12.0 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 +github.com/gogo/protobuf v1.0.0 +github.com/golang/protobuf v1.1.0 +github.com/mattn/go-shellwords master +github.com/containerd/containerd 9754871865f7fe2f4e74d43e2fc7ccd237edcbce # v1.2.2 +github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d +golang.org/x/sync e225da77a7e68af35c70ccbf71af2b83e6acac3c +github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b diff --git a/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/.travis.yml b/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/.travis.yml new file mode 100644 index 000000000..dd6767204 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/.travis.yml @@ -0,0 +1,50 @@ +dist: bionic +language: go +go_import_path: github.com/godbus/dbus + +go: + - 1.11.x + - 1.12.x + - 1.13.x + - tip + +matrix: + fast_finish: true + allow_failures: + - go: tip + +addons: + apt: + packages: + - dbus + - dbus-x11 + +before_install: + - export GO111MODULE=on + +script: + - go test -v -race -mod=readonly ./... # Run all the tests with the race detector enabled + - go vet ./... # go vet is the official Go static analyzer + +jobs: + include: + # The build matrix doesn't cover build stages, so manually expand + # the jobs with anchors + - &multiarch + stage: "Multiarch Test" + go: 1.11.x + env: TARGETS="386 arm arm64 ppc64le" + before_install: + - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + script: + - | + set -e + for target in $TARGETS; do + printf "\e[1mRunning test suite under ${target}.\e[0m\n" + GOARCH="$target" go test -v ./... + printf "\n\n" + done + - <<: *multiarch + go: 1.12.x + - <<: *multiarch + go: 1.13.x diff --git a/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md b/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md new file mode 100644 index 000000000..c88f9b2bd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/godbus/dbus/v5/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +