mirror of
https://github.com/containers/skopeo.git
synced 2026-02-02 07:19:35 +00:00
Compare commits
51 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41991bab70 | ||
|
|
2b5086167f | ||
|
|
b46d16f48c | ||
|
|
9fef0eb3f3 | ||
|
|
30b0a1741e | ||
|
|
945b9dc08f | ||
|
|
904b064da4 | ||
|
|
7ae62af073 | ||
|
|
7525a79c93 | ||
|
|
07287b5783 | ||
|
|
0a2a62ac20 | ||
|
|
5581c62a3a | ||
|
|
6b5bdb7563 | ||
|
|
2bdffc89c2 | ||
|
|
65e6449c95 | ||
|
|
2829f7da9e | ||
|
|
ece44c2842 | ||
|
|
0fa335c149 | ||
|
|
5c0ad57c2c | ||
|
|
b2934e7cf6 | ||
|
|
2af7114ea1 | ||
|
|
0e1cc9203e | ||
|
|
e255ccc145 | ||
|
|
9447a55b61 | ||
|
|
9fdceeb2b2 | ||
|
|
18ee5f8119 | ||
|
|
ab6a17059c | ||
|
|
81c5e94850 | ||
|
|
99dc83062a | ||
|
|
4d8ea6729f | ||
|
|
ac85091ecd | ||
|
|
ffa640c2b0 | ||
|
|
c73bcba7e6 | ||
|
|
329e1cf61c | ||
|
|
854f766dc7 | ||
|
|
5c73fdbfdc | ||
|
|
097549748a | ||
|
|
032309941b | ||
|
|
d93a581fb8 | ||
|
|
52075ab386 | ||
|
|
d65ae4b1d7 | ||
|
|
c32d27f59e | ||
|
|
883d65a54a | ||
|
|
94728fb73f | ||
|
|
520f0e5ddb | ||
|
|
fa39b49a5c | ||
|
|
0490018903 | ||
|
|
b434c8f424 | ||
|
|
79de2d9f09 | ||
|
|
2031e17b3c | ||
|
|
5a050c1383 |
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:17.10
|
||||
FROM ubuntu:18.10
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
golang \
|
||||
|
||||
27
Makefile
27
Makefile
@@ -30,7 +30,7 @@ ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
endif
|
||||
|
||||
ifeq ($(shell go env GOOS), linux)
|
||||
ifeq ($(shell $(GO) env GOOS), linux)
|
||||
GO_DYN_FLAGS="-buildmode=pie"
|
||||
endif
|
||||
|
||||
@@ -50,6 +50,7 @@ CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
MANPAGES ?= $(MANPAGES_MD:%.md=%)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
@@ -67,6 +68,19 @@ endif
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs-in-container
|
||||
|
||||
help:
|
||||
@echo "Usage: make <target>"
|
||||
@echo
|
||||
@echo " * 'install' - Install binaries and documents to system locations"
|
||||
@echo " * 'binary' - Build skopeo with a container"
|
||||
@echo " * 'binary-local' - Build skopeo locally"
|
||||
@echo " * 'test-unit' - Execute unit tests"
|
||||
@echo " * 'test-integration' - Execute integration tests"
|
||||
@echo " * 'validate' - Verify whether there is no conflict and all Go source files have been formatted, linted and vetted"
|
||||
@echo " * 'check' - Including above validate, test-integration and test-unit"
|
||||
@echo " * 'shell' - Run the built image and attach to a shell"
|
||||
@echo " * 'clean' - Clean artifacts"
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
@@ -89,10 +103,10 @@ binary-local-static:
|
||||
build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(MANPAGES): %: %.md
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
docs: $(MANPAGES)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
@@ -113,9 +127,9 @@ install-binary: ./skopeo
|
||||
install -d -m 755 ${INSTALLDIR}
|
||||
install -m 755 skopeo ${INSTALLDIR}/skopeo
|
||||
|
||||
install-docs: docs/skopeo.1
|
||||
install-docs: docs
|
||||
install -d -m 755 ${MANINSTALLDIR}/man1
|
||||
install -m 644 docs/skopeo.1 ${MANINSTALLDIR}/man1/skopeo.1
|
||||
install -m 644 docs/*.1 ${MANINSTALLDIR}/man1/
|
||||
|
||||
install-completions:
|
||||
install -m 755 -d ${BASHINSTALLDIR}
|
||||
@@ -151,4 +165,5 @@ test-unit-local:
|
||||
|
||||
vendor: vendor.conf .install.vndr
|
||||
$(GOPATH)/bin/vndr \
|
||||
-whitelist '^github.com/containers/image/docs/.*'
|
||||
-whitelist '^github.com/containers/image/docs/.*' \
|
||||
-whitelist '^github.com/containers/image/registries.conf'
|
||||
|
||||
@@ -85,6 +85,11 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
imageNames := args
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageNames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
@@ -92,13 +97,13 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(args[0])
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", args[0], err)
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageNames[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(args[1])
|
||||
destRef, err := alltransports.ParseImageName(imageNames[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", args[1], err)
|
||||
return fmt.Errorf("Invalid destination name %s: %v", imageNames[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
|
||||
@@ -44,10 +44,15 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
ref, err := alltransports.ParseImageName(args[0])
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(imageName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", args[0], err)
|
||||
return fmt.Errorf("Invalid source name %s: %v", imageName, err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
|
||||
@@ -34,6 +34,7 @@ type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
@@ -58,9 +59,14 @@ func inspectCmd(global *globalOptions) cli.Command {
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
Usage: "output raw manifest or configuration",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "config",
|
||||
Usage: "output configuration",
|
||||
Destination: &opts.config,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
@@ -73,7 +79,13 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
img, err := parseImage(ctx, opts.image, args[0])
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img, err := parseImage(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -88,12 +100,32 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.raw {
|
||||
if opts.config && opts.raw {
|
||||
configBlob, err := img.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading configuration blob: %v", err)
|
||||
}
|
||||
_, err = stdout.Write(configBlob)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing configuration blob to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
} else if opts.config {
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading OCI-formatted configuration data: %v", err)
|
||||
}
|
||||
err = json.NewEncoder(stdout).Encode(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing OCI-formatted configuration data to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -43,6 +43,11 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
imageName := args[0]
|
||||
|
||||
if err := reexecIfNecessaryForImages(imageName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
@@ -52,7 +57,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, args[0])
|
||||
rawSource, err := parseImageSource(ctx, opts.image, imageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,14 +18,15 @@ import (
|
||||
var gitCommit = ""
|
||||
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
@@ -83,6 +84,12 @@ func createApp() (*cli.App, *globalOptions) {
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries-conf",
|
||||
Usage: "path to the registries.conf file",
|
||||
Destination: &opts.registriesConfPath,
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
@@ -99,7 +106,7 @@ func createApp() (*cli.App, *globalOptions) {
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(_ *cli.Context) error {
|
||||
func (opts *globalOptions) before(ctx *cli.Context) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
11
cmd/skopeo/unshare.go
Normal file
11
cmd/skopeo/unshare.go
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
func maybeReexec() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(inputImageNames ...string) error {
|
||||
return nil
|
||||
}
|
||||
46
cmd/skopeo/unshare_linux.go
Normal file
46
cmd/skopeo/unshare_linux.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/containers/buildah/pkg/unshare"
|
||||
"github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
var neededCapabilities = []capability.Cap{
|
||||
capability.CAP_CHOWN,
|
||||
capability.CAP_DAC_OVERRIDE,
|
||||
capability.CAP_FOWNER,
|
||||
capability.CAP_FSETID,
|
||||
capability.CAP_MKNOD,
|
||||
capability.CAP_SETFCAP,
|
||||
}
|
||||
|
||||
func maybeReexec() error {
|
||||
// With Skopeo we need only the subset of the root capabilities necessary
|
||||
// for pulling an image to the storage. Do not attempt to create a namespace
|
||||
// if we already have the capabilities we need.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading the current capabilities sets")
|
||||
}
|
||||
for _, cap := range neededCapabilities {
|
||||
if !capabilities.Get(capability.EFFECTIVE, cap) {
|
||||
// We miss a capability we need, create a user namespaces
|
||||
unshare.MaybeReexecUsingUserNamespace(true)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reexecIfNecessaryForImages(imageNames ...string) error {
|
||||
// Check if container-storage are used before doing unshare
|
||||
for _, imageName := range imageNames {
|
||||
if alltransports.TransportFromImageName(imageName).Name() == storage.Transport.Name() {
|
||||
return maybeReexec()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -59,6 +59,7 @@ type imageOptions struct {
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
@@ -101,6 +102,11 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "no-creds",
|
||||
Usage: "Access the registry anonymously",
|
||||
Destination: &opts.noCreds,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
@@ -108,14 +114,15 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
SystemRegistriesConfPath: opts.global.registriesConfPath,
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
@@ -127,6 +134,9 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
@@ -134,6 +144,9 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,9 @@ _complete_() {
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
@@ -55,6 +55,8 @@ _skopeo_copy() {
|
||||
local boolean_options="
|
||||
--dest-compress
|
||||
--remove-signatures
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
@@ -71,8 +73,10 @@ _skopeo_inspect() {
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
@@ -84,7 +88,7 @@ _skopeo_inspect() {
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
local options_with_args="
|
||||
-o --output
|
||||
-o --output
|
||||
"
|
||||
local boolean_options="
|
||||
"
|
||||
@@ -115,6 +119,7 @@ _skopeo_delete() {
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--no-creds
|
||||
"
|
||||
|
||||
local transports="
|
||||
@@ -126,44 +131,44 @@ _skopeo_delete() {
|
||||
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-dir
|
||||
--creds
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
}
|
||||
|
||||
_skopeo_skopeo() {
|
||||
local options_with_args="
|
||||
--policy
|
||||
--registries.d
|
||||
--policy
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
"
|
||||
commands=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
|
||||
case "$prev" in
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
$main_options_with_args_glob )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -181,15 +186,17 @@ _cli_bash_autocomplete() {
|
||||
local counter=1
|
||||
counter=1
|
||||
while [ $counter -lt $cword ]; do
|
||||
case "!${words[$counter]}" in
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
case "${words[$counter]}" in
|
||||
-*)
|
||||
;;
|
||||
*)
|
||||
command=$(echo "${words[$counter]}" | sed 's/-/_/g')
|
||||
cpos=$counter
|
||||
(( cpos++ ))
|
||||
break
|
||||
;;
|
||||
esac
|
||||
(( counter++ ))
|
||||
done
|
||||
|
||||
local completions_func=_skopeo_${command}
|
||||
|
||||
@@ -38,10 +38,14 @@ If the authorization state is not found there, $HOME/.docker/config.json is chec
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
@@ -30,6 +30,8 @@ $ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distrib
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
**skopeo inspect** [**--raw**] [**--config**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
@@ -12,17 +12,27 @@ Return low-level information about _image-name_ in a registry
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--config** output configuration in OCI format, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--config** **--raw** output configuration in raw format
|
||||
|
||||
_image-name_ name of image to retrieve configuration for
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
Path of the authentication file. Default is ${XDG\_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
**--cert-dir** _path_ Use certificates at _path_ (\*.crt, \*.cert, \*.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
**--no-creds** _bool-value_ Access the registry anonymously.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
|
||||
@@ -76,16 +76,16 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verity an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
**/etc/containers/registries.d**
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/containers-policy.json.5.md .
|
||||
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
|
||||
@@ -662,3 +662,37 @@ func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
}
|
||||
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
28
integration/fixtures/registries.conf
Normal file
28
integration/fixtures/registries.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
[[registry]]
|
||||
location = "mirror.invalid"
|
||||
mirror = [
|
||||
{ location = "mirror-0.invalid" },
|
||||
{ location = "mirror-1.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
|
||||
# This entry is currently unused and exists only to ensure
|
||||
# that the mirror.invalid/busybox is not rewritten twice.
|
||||
[[registry]]
|
||||
location = "gcr.io"
|
||||
prefix = "gcr.io/google-containers"
|
||||
|
||||
[[registry]]
|
||||
location = "invalid.invalid"
|
||||
mirror = [
|
||||
{ location = "invalid-mirror-0.invalid" },
|
||||
{ location = "invalid-mirror-1.invalid" },
|
||||
]
|
||||
|
||||
[[registry]]
|
||||
location = "gcr.invalid"
|
||||
prefix = "gcr.invalid/foo/bar"
|
||||
mirror = [
|
||||
{ location = "wrong-mirror-0.invalid" },
|
||||
{ location = "gcr.io/google-containers" },
|
||||
]
|
||||
@@ -2,13 +2,14 @@
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image v1.5
|
||||
github.com/containers/image 2c0349c99af7d90694b3faa0e9bde404d407b145
|
||||
github.com/containers/buildah 810efa340ab43753034e2ed08ec290e4abab7e72
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/containers/storage v1.11
|
||||
github.com/containers/storage v1.12.3
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
@@ -17,7 +18,7 @@ github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto a4c6cb3142f211c99e4bf4cd769535b29a9b616f
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
@@ -58,7 +59,7 @@ github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux v1.1
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
|
||||
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@@ -1,14 +1,21 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
18
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
18
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,17 +1,17 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/mojombo/toml
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
@@ -26,8 +26,7 @@ go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml)
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
@@ -87,7 +86,7 @@ type TOML struct {
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
@@ -120,7 +119,7 @@ for _, s := range favorites.Song {
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
@@ -217,4 +216,3 @@ Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
||||
|
||||
48
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
48
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -10,7 +10,9 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var e = fmt.Errorf
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
@@ -103,6 +105,13 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
@@ -111,7 +120,7 @@ func Decode(data string, v interface{}) (MetaData, error) {
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, rvalue(v))
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
@@ -211,7 +220,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
@@ -219,7 +228,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -228,7 +237,8 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return mismatch(rv, "map", mapping)
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
@@ -253,14 +263,13 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return e("Type mismatch for '%s.%s': %s",
|
||||
rv.Type().String(), f.name, err)
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,15 +387,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("Value '%d' is out of range for int8.", num)
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("Value '%d' is out of range for int16.", num)
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("Value '%d' is out of range for int32.", num)
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
@@ -397,15 +406,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("Value '%d' is out of range for uint8.", num)
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("Value '%d' is out of range for uint16.", num)
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("Value '%d' is out of range for uint32.", num)
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
@@ -471,7 +480,7 @@ func rvalue(v interface{}) reflect.Value {
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanAddr() {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
@@ -496,10 +505,5 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("Expected %s but found '%T'.", expected, data)
|
||||
}
|
||||
|
||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
||||
user.Type().String(), expected, data)
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
||||
|
||||
3
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
@@ -77,9 +77,8 @@ func (k Key) maybeQuoted(i int) string {
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
} else {
|
||||
return k[i]
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
2
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/mojombo/toml
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
|
||||
81
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
81
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -16,17 +16,17 @@ type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"can't encode array with mixed element types")
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"can't encode array with nil element")
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"can't encode a map with non-string key type")
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"can't encode an anonymous field that is not a struct")
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"TOML array element can't contain a table")
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"top-level values must be a Go map or struct")
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
@@ -148,7 +148,7 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
@@ -191,7 +191,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra new line between top-level tables.
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
@@ -315,10 +315,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
@@ -347,17 +353,18 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := sft.Tag.Get("toml")
|
||||
if tag == "-" {
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName, opts := getOptions(tag)
|
||||
if keyName == "" {
|
||||
keyName = sft.Name
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -392,9 +399,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
} else {
|
||||
return tomlArray
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
@@ -451,17 +457,30 @@ func tomlArrayType(rv reflect.Value) tomlType {
|
||||
return firstType
|
||||
}
|
||||
|
||||
func getOptions(keyName string) (string, map[string]struct{}) {
|
||||
opts := make(map[string]struct{})
|
||||
ss := strings.Split(keyName, ",")
|
||||
name := ss[0]
|
||||
if len(ss) > 1 {
|
||||
for _, opt := range ss {
|
||||
opts[opt] = struct{}{}
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
|
||||
return name, opts
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
|
||||
496
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
496
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -3,6 +3,7 @@ package toml
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
@@ -29,24 +30,28 @@ const (
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
@@ -55,11 +60,18 @@ type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
@@ -87,7 +99,7 @@ func (lx *lexer) nextItem() item {
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input + "\n",
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
@@ -102,7 +114,7 @@ func (lx *lexer) push(state stateFn) {
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
@@ -124,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) {
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.width = 0
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -142,9 +163,20 @@ func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
lx.pos -= lx.width
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
@@ -166,9 +198,22 @@ func (lx *lexer) peek() rune {
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (new lines, tabs, etc.).
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
@@ -184,7 +229,6 @@ func lexTop(lx *lexer) stateFn {
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
@@ -193,7 +237,7 @@ func lexTop(lx *lexer) stateFn {
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
@@ -208,12 +252,12 @@ func lexTop(lx *lexer) stateFn {
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a new line for us.
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
@@ -222,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
||||
"comment or EOF, but got %q instead.", r)
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
@@ -253,21 +297,22 @@ func lexTableEnd(lx *lexer) stateFn {
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
||||
"but got %q instead.", arrayTableEnd, r)
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("Unexpected end of table name. (Table names cannot " +
|
||||
"be empty.)")
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("Unexpected table separator. (Table names cannot " +
|
||||
"be empty.)")
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
@@ -277,24 +322,22 @@ func lexTableNameStart(lx *lexer) stateFn {
|
||||
}
|
||||
}
|
||||
|
||||
// lexTableName lexes the name of a table. It assumes that at least one
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
case r == tableSep || r == tableEnd:
|
||||
lx.backup()
|
||||
lx.emitTrim(itemText)
|
||||
return lexTableNameEnd
|
||||
default:
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
@@ -304,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
|
||||
"instead.", r)
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
@@ -338,14 +381,15 @@ func lexBareKey(lx *lexer) stateFn {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.emitTrim(itemText)
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emitTrim(itemText)
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
@@ -367,20 +411,26 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new
|
||||
// lines.
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case r == stringStart:
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
@@ -390,7 +440,7 @@ func lexValue(lx *lexer) stateFn {
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case r == rawStringStart:
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
@@ -400,23 +450,24 @@ func lexValue(lx *lexer) stateFn {
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case r == 't':
|
||||
return lexTrue
|
||||
case r == 'f':
|
||||
return lexFalse
|
||||
case r == '-':
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("Expected value but found %q instead.", r)
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
@@ -425,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator %q.",
|
||||
arrayValTerm)
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
@@ -437,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
@@ -447,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
@@ -488,11 +598,12 @@ func lexString(lx *lexer) stateFn {
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '\\':
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case r == stringEnd:
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
@@ -516,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
@@ -529,12 +642,13 @@ func lexRawString(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'" has already been consumed and
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == rawStringEnd:
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
@@ -559,11 +673,10 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
} else {
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
@@ -588,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn {
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
|
||||
"\\uXXXX and \\UXXXXXXXX.", r)
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
@@ -599,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
@@ -611,40 +723,43 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, float or
|
||||
// datetime. It assumes that NO negative sign has been consumed.
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
return lexNumberOrDate
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
@@ -652,46 +767,34 @@ func lexNumberOrDate(lx *lexer) stateFn {
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found %q instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
||||
"but found %q instead.", f, r)
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that
|
||||
// a negative sign has already been read, but that *no* digits have been
|
||||
// consumed. lexNumberStart will move to the appropriate integer or float
|
||||
// states.
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
@@ -699,11 +802,14 @@ func lexNumberStart(lx *lexer) stateFn {
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isDigit(r):
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
@@ -711,60 +817,42 @@ func lexNumber(lx *lexer) stateFn {
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"%q instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
||||
// consumed.
|
||||
func lexConst(lx *lexer, s string) stateFn {
|
||||
for i := range s[1:] {
|
||||
if r := lx.next(); r != rune(s[i+1]) {
|
||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
||||
s[:i]+string(r))
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
||||
// been consumed.
|
||||
func lexTrue(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "true"); fn != nil {
|
||||
return fn
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
||||
// been consumed.
|
||||
func lexFalse(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "false"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
@@ -776,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
@@ -834,13 +922,7 @@ func (itype itemType) String() string {
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemRawString:
|
||||
return "String"
|
||||
case itemMultilineString:
|
||||
return "String"
|
||||
case itemRawMultilineString:
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
|
||||
133
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
133
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -2,7 +2,6 @@ package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -81,7 +80,7 @@ func (p *parser) next() item {
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
@@ -179,10 +178,18 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// See comment below for floats describing why we make a
|
||||
// distinction between a bug and a user error.
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
@@ -194,29 +201,57 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid float, but it's possible that the float is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
//
|
||||
// This is also true for integers.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected float value, but got '%s'.", it.val)
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
@@ -234,11 +269,75 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
|
||||
9
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
9
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@@ -95,8 +95,8 @@ func typeFields(t reflect.Type) []field {
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
name, _ := getOptions(sf.Tag.Get("toml"))
|
||||
if name == "-" {
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
@@ -110,8 +110,9 @@ func typeFields(t reflect.Type) []field {
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
|
||||
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
@@ -1,252 +0,0 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
201
vendor/github.com/containers/buildah/LICENSE
generated
vendored
Normal file
201
vendor/github.com/containers/buildah/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
129
vendor/github.com/containers/buildah/README.md
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||

|
||||
|
||||
# [Buildah](https://www.youtube.com/embed/YVk5NgSiUw8) - a tool that facilitates building [Open Container Initiative (OCI)](https://www.opencontainers.org/) container images
|
||||
|
||||
[](https://goreportcard.com/report/github.com/containers/buildah)
|
||||
[](https://travis-ci.org/containers/buildah)
|
||||
|
||||
The Buildah package provides a command line tool that can be used to
|
||||
* create a working container, either from scratch or using an image as a starting point
|
||||
* create an image, either from a working container or via the instructions in a Dockerfile
|
||||
* images can be built in either the OCI image format or the traditional upstream docker image format
|
||||
* mount a working container's root filesystem for manipulation
|
||||
* unmount a working container's root filesystem
|
||||
* use the updated contents of a container's root filesystem as a filesystem layer to create a new image
|
||||
* delete a working container or an image
|
||||
* rename a local container
|
||||
|
||||
## Buildah Information for Developers
|
||||
|
||||
For blogs, release announcements and more, please checkout the [buildah.io](https://buildah.io) website!
|
||||
|
||||
**[Buildah Demos](demos)**
|
||||
|
||||
**[Changelog](CHANGELOG.md)**
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**[Development Plan](developmentplan.md)**
|
||||
|
||||
**[Installation notes](install.md)**
|
||||
|
||||
**[Troubleshooting Guide](troubleshooting.md)**
|
||||
|
||||
**[Tutorials](docs/tutorials)**
|
||||
|
||||
## Buildah and Podman relationship
|
||||
|
||||
Buildah and Podman are two complementary open-source projects that are
|
||||
available on most Linux platforms and both projects reside at
|
||||
[GitHub.com](https://github.com) with Buildah
|
||||
[here](https://github.com/containers/buildah) and Podman
|
||||
[here](https://github.com/containers/libpod). Both, Buildah and Podman are
|
||||
command line tools that work on Open Container Initiative (OCI) images and
|
||||
containers. The two projects differentiate in their specialization.
|
||||
|
||||
Buildah specializes in building OCI images. Buildah's commands replicate all
|
||||
of the commands that are found in a Dockerfile. This allows building images
|
||||
with and without Dockerfiles while not requiring any root privileges.
|
||||
Buildah’s ultimate goal is to provide a lower-level coreutils interface to
|
||||
build images. The flexibility of building images without Dockerfiles allows
|
||||
for the integration of other scripting languages into the build process.
|
||||
Buildah follows a simple fork-exec model and does not run as a daemon
|
||||
but it is based on a comprehensive API in golang, which can be vendored
|
||||
into other tools.
|
||||
|
||||
Podman specializes in all of the commands and functions that help you to maintain and modify
|
||||
OCI images, such as pulling and tagging. It also allows you to create, run, and maintain those containers
|
||||
created from those images.
|
||||
|
||||
A major difference between Podman and Buildah is their concept of a container. Podman
|
||||
allows users to create "traditional containers" where the intent of these containers is
|
||||
to be long lived. While Buildah containers are really just created to allow content
|
||||
to be added back to the container image. An easy way to think of it is the
|
||||
`buildah run` command emulates the RUN command in a Dockerfile while the `podman run`
|
||||
command emulates the `docker run` command in functionality. Because of this and their underlying
|
||||
storage differences, you can not see Podman containers from within Buildah or vice versa.
|
||||
|
||||
In short, Buildah is an efficient way to create OCI images while Podman allows
|
||||
you to manage and maintain those images and containers in a production environment using
|
||||
familiar container cli commands. For more details, see the
|
||||
[Container Tools Guide](https://github.com/containers/buildah/tree/master/docs/containertools).
|
||||
|
||||
## Example
|
||||
|
||||
From [`./examples/lighttpd.sh`](examples/lighttpd.sh):
|
||||
|
||||
```bash
|
||||
$ cat > lighttpd.sh <<"EOF"
|
||||
#!/bin/bash -x
|
||||
|
||||
ctr1=$(buildah from "${1:-fedora}")
|
||||
|
||||
## Get all updates and install our minimal httpd server
|
||||
buildah run "$ctr1" -- dnf update -y
|
||||
buildah run "$ctr1" -- dnf install -y lighttpd
|
||||
|
||||
## Include some buildtime annotations
|
||||
buildah config --annotation "com.example.build.host=$(uname -n)" "$ctr1"
|
||||
|
||||
## Run our server and expose the port
|
||||
buildah config --cmd "/usr/sbin/lighttpd -D -f /etc/lighttpd/lighttpd.conf" "$ctr1"
|
||||
buildah config --port 80 "$ctr1"
|
||||
|
||||
## Commit this container to an image name
|
||||
buildah commit "$ctr1" "${2:-$USER/lighttpd}"
|
||||
EOF
|
||||
|
||||
$ chmod +x lighttpd.sh
|
||||
$ sudo ./lighttpd.sh
|
||||
```
|
||||
|
||||
## Commands
|
||||
| Command | Description |
|
||||
| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
| [buildah-add(1)](/docs/buildah-add.md) | Add the contents of a file, URL, or a directory to the container. |
|
||||
| [buildah-bud(1)](/docs/buildah-bud.md) | Build an image using instructions from Dockerfiles. |
|
||||
| [buildah-commit(1)](/docs/buildah-commit.md) | Create an image from a working container. |
|
||||
| [buildah-config(1)](/docs/buildah-config.md) | Update image configuration settings. |
|
||||
| [buildah-containers(1)](/docs/buildah-containers.md) | List the working containers and their base images. |
|
||||
| [buildah-copy(1)](/docs/buildah-copy.md) | Copies the contents of a file, URL, or directory into a container's working directory. |
|
||||
| [buildah-from(1)](/docs/buildah-from.md) | Creates a new working container, either from scratch or using a specified image as a starting point. |
|
||||
| [buildah-images(1)](/docs/buildah-images.md) | List images in local storage. |
|
||||
| [buildah-info(1)](/docs/buildah-info.md) | Display Buildah system information. |
|
||||
| [buildah-inspect(1)](/docs/buildah-inspect.md) | Inspects the configuration of a container or image. |
|
||||
| [buildah-mount(1)](/docs/buildah-mount.md) | Mount the working container's root filesystem. |
|
||||
| [buildah-pull(1)](/docs/buildah-pull.md) | Pull an image from the specified location. |
|
||||
| [buildah-push(1)](/docs/buildah-push.md) | Push an image from local storage to elsewhere. |
|
||||
| [buildah-rename(1)](/docs/buildah-rename.md) | Rename a local container. |
|
||||
| [buildah-rm(1)](/docs/buildah-rm.md) | Removes one or more working containers. |
|
||||
| [buildah-rmi(1)](/docs/buildah-rmi.md) | Removes one or more images. |
|
||||
| [buildah-run(1)](/docs/buildah-run.md) | Run a command inside of the container. |
|
||||
| [buildah-tag(1)](/docs/buildah-tag.md) | Add an additional name to a local image. |
|
||||
| [buildah-umount(1)](/docs/buildah-umount.md) | Unmount a working container's root file system. |
|
||||
| [buildah-unshare(1)](/docs/buildah-unshare.md) | Launch a command in a user namespace with modified ID mappings. |
|
||||
| [buildah-version(1)](/docs/buildah-version.md) | Display the Buildah Version Information |
|
||||
|
||||
**Future goals include:**
|
||||
* more CI tests
|
||||
* additional CLI commands (?)
|
||||
276
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
276
vendor/github.com/containers/buildah/pkg/unshare/unshare.c
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/memfd.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifndef F_LINUX_SPECIFIC_BASE
|
||||
#define F_LINUX_SPECIFIC_BASE 1024
|
||||
#endif
|
||||
#ifndef F_ADD_SEALS
|
||||
#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9)
|
||||
#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10)
|
||||
#endif
|
||||
#ifndef F_SEAL_SEAL
|
||||
#define F_SEAL_SEAL 0x0001LU
|
||||
#endif
|
||||
#ifndef F_SEAL_SHRINK
|
||||
#define F_SEAL_SHRINK 0x0002LU
|
||||
#endif
|
||||
#ifndef F_SEAL_GROW
|
||||
#define F_SEAL_GROW 0x0004LU
|
||||
#endif
|
||||
#ifndef F_SEAL_WRITE
|
||||
#define F_SEAL_WRITE 0x0008LU
|
||||
#endif
|
||||
|
||||
#define BUFSTEP 1024
|
||||
|
||||
static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces";
|
||||
static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone";
|
||||
|
||||
static int _containers_unshare_parse_envint(const char *envname) {
|
||||
char *p, *q;
|
||||
long l;
|
||||
|
||||
p = getenv(envname);
|
||||
if (p == NULL) {
|
||||
return -1;
|
||||
}
|
||||
q = NULL;
|
||||
l = strtol(p, &q, 10);
|
||||
if ((q == NULL) || (*q != '\0')) {
|
||||
fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p);
|
||||
_exit(1);
|
||||
}
|
||||
unsetenv(envname);
|
||||
return l;
|
||||
}
|
||||
|
||||
static void _check_proc_sys_file(const char *path)
|
||||
{
|
||||
FILE *fp;
|
||||
char buf[32];
|
||||
size_t n_read;
|
||||
long r;
|
||||
|
||||
fp = fopen(path, "r");
|
||||
if (fp == NULL) {
|
||||
if (errno != ENOENT)
|
||||
fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces);
|
||||
} else {
|
||||
memset(buf, 0, sizeof(buf));
|
||||
n_read = fread(buf, 1, sizeof(buf) - 1, fp);
|
||||
if (n_read > 0) {
|
||||
r = atoi(buf);
|
||||
if (r == 0) {
|
||||
fprintf(stderr, "User namespaces are not enabled in %s.\n", path);
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path);
|
||||
}
|
||||
fclose(fp);
|
||||
}
|
||||
}
|
||||
|
||||
static char **parse_proc_stringlist(const char *list) {
|
||||
int fd, n, i, n_strings;
|
||||
char *buf, *new_buf, **ret;
|
||||
size_t size, new_size, used;
|
||||
|
||||
fd = open(list, O_RDONLY);
|
||||
if (fd == -1) {
|
||||
return NULL;
|
||||
}
|
||||
buf = NULL;
|
||||
size = 0;
|
||||
used = 0;
|
||||
for (;;) {
|
||||
new_size = used + BUFSTEP;
|
||||
new_buf = realloc(buf, new_size);
|
||||
if (new_buf == NULL) {
|
||||
free(buf);
|
||||
fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP));
|
||||
return NULL;
|
||||
}
|
||||
buf = new_buf;
|
||||
size = new_size;
|
||||
memset(buf + used, '\0', size - used);
|
||||
n = read(fd, buf + used, size - used - 1);
|
||||
if (n < 0) {
|
||||
fprintf(stderr, "read(): %m\n");
|
||||
return NULL;
|
||||
}
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
used += n;
|
||||
}
|
||||
close(fd);
|
||||
n_strings = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
n_strings++;
|
||||
}
|
||||
}
|
||||
ret = calloc(n_strings + 1, sizeof(char *));
|
||||
if (ret == NULL) {
|
||||
fprintf(stderr, "calloc(): out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
i = 0;
|
||||
for (n = 0; n < used; n++) {
|
||||
if ((n == 0) || (buf[n-1] == '\0')) {
|
||||
ret[i++] = &buf[n];
|
||||
}
|
||||
}
|
||||
ret[i] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int containers_reexec(void) {
|
||||
char **argv, *exename;
|
||||
int fd, mmfd, n_read, n_written;
|
||||
struct stat st;
|
||||
char buf[2048];
|
||||
|
||||
argv = parse_proc_stringlist("/proc/self/cmdline");
|
||||
if (argv == NULL) {
|
||||
return -1;
|
||||
}
|
||||
fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "open(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fstat(fd, &st) == -1) {
|
||||
fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
exename = basename(argv[0]);
|
||||
mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC);
|
||||
if (mmfd == -1) {
|
||||
fprintf(stderr, "memfd_create(): %m\n");
|
||||
return -1;
|
||||
}
|
||||
for (;;) {
|
||||
n_read = read(fd, buf, sizeof(buf));
|
||||
if (n_read < 0) {
|
||||
fprintf(stderr, "read(\"/proc/self/exe\"): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_read == 0) {
|
||||
break;
|
||||
}
|
||||
n_written = write(mmfd, buf, n_read);
|
||||
if (n_written < 0) {
|
||||
fprintf(stderr, "write(anonfd): %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (n_written != n_read) {
|
||||
fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error sealing memfd copy: %m\n");
|
||||
return -1;
|
||||
}
|
||||
if (fexecve(mmfd, argv, environ) == -1) {
|
||||
close(mmfd);
|
||||
fprintf(stderr, "Error during reexec(...): %m\n");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void _containers_unshare(void)
|
||||
{
|
||||
int flags, pidfd, continuefd, n, pgrp, sid, ctty;
|
||||
char buf[2048];
|
||||
|
||||
flags = _containers_unshare_parse_envint("_Containers-unshare");
|
||||
if (flags == -1) {
|
||||
return;
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (unshare(CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n");
|
||||
_check_proc_sys_file (_max_user_namespaces);
|
||||
_check_proc_sys_file (_unprivileged_user_namespaces);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe");
|
||||
if (pidfd != -1) {
|
||||
snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid());
|
||||
size_t size = write(pidfd, buf, strlen(buf));
|
||||
if (size != strlen(buf)) {
|
||||
fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd);
|
||||
_exit(1);
|
||||
}
|
||||
close(pidfd);
|
||||
}
|
||||
continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe");
|
||||
if (continuefd != -1) {
|
||||
n = read(continuefd, buf, sizeof(buf));
|
||||
if (n > 0) {
|
||||
fprintf(stderr, "Error: %.*s\n", n, buf);
|
||||
_exit(1);
|
||||
}
|
||||
close(continuefd);
|
||||
}
|
||||
sid = _containers_unshare_parse_envint("_Containers-setsid");
|
||||
if (sid == 1) {
|
||||
if (setsid() == -1) {
|
||||
fprintf(stderr, "Error during setsid: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
pgrp = _containers_unshare_parse_envint("_Containers-setpgrp");
|
||||
if (pgrp == 1) {
|
||||
if (setpgrp() == -1) {
|
||||
fprintf(stderr, "Error during setpgrp: %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
ctty = _containers_unshare_parse_envint("_Containers-ctty");
|
||||
if (ctty != -1) {
|
||||
if (ioctl(ctty, TIOCSCTTY, 0) == -1) {
|
||||
fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty);
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & CLONE_NEWUSER) != 0) {
|
||||
if (setresgid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresgid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
if (setresuid(0, 0, 0) != 0) {
|
||||
fprintf(stderr, "Error during setresuid(0): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if ((flags & ~CLONE_NEWUSER) != 0) {
|
||||
if (unshare(flags & ~CLONE_NEWUSER) == -1) {
|
||||
fprintf(stderr, "Error during unshare(...): %m\n");
|
||||
_exit(1);
|
||||
}
|
||||
}
|
||||
if (containers_reexec() != 0) {
|
||||
_exit(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
545
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
545
vendor/github.com/containers/buildah/pkg/unshare/unshare.go
generated
vendored
Normal file
@@ -0,0 +1,545 @@
|
||||
// +build linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
// handles setting ID maps and other related settings by triggering
|
||||
// initialization code in the child.
|
||||
type Cmd struct {
|
||||
*exec.Cmd
|
||||
UnshareFlags int
|
||||
UseNewuidmap bool
|
||||
UidMappings []specs.LinuxIDMapping
|
||||
UseNewgidmap bool
|
||||
GidMappings []specs.LinuxIDMapping
|
||||
GidMappingsEnableSetgroups bool
|
||||
Setsid bool
|
||||
Setpgrp bool
|
||||
Ctty *os.File
|
||||
OOMScoreAdj *int
|
||||
Hook func(pid int) error
|
||||
}
|
||||
|
||||
// Command creates a new Cmd which can be customized.
|
||||
func Command(args ...string) *Cmd {
|
||||
cmd := reexec.Command(args...)
|
||||
return &Cmd{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cmd) Start() error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// Set an environment variable to tell the child to synchronize its startup.
|
||||
if c.Env == nil {
|
||||
c.Env = os.Environ()
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags))
|
||||
|
||||
// Please the libpod "rootless" package to find the expected env variables.
|
||||
if os.Geteuid() != 0 {
|
||||
c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done")
|
||||
c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", os.Geteuid()))
|
||||
}
|
||||
|
||||
// Create the pipe for reading the child's PID.
|
||||
pidRead, pidWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, pidWrite)
|
||||
|
||||
// Create the pipe for letting the child know to proceed.
|
||||
continueRead, continueWrite, err := os.Pipe()
|
||||
if err != nil {
|
||||
pidRead.Close()
|
||||
pidWrite.Close()
|
||||
return errors.Wrapf(err, "error creating pid pipe")
|
||||
}
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, continueRead)
|
||||
|
||||
// Pass along other instructions.
|
||||
if c.Setsid {
|
||||
c.Env = append(c.Env, "_Containers-setsid=1")
|
||||
}
|
||||
if c.Setpgrp {
|
||||
c.Env = append(c.Env, "_Containers-setpgrp=1")
|
||||
}
|
||||
if c.Ctty != nil {
|
||||
c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3))
|
||||
c.ExtraFiles = append(c.ExtraFiles, c.Ctty)
|
||||
}
|
||||
|
||||
// Make sure we clean up our pipes.
|
||||
defer func() {
|
||||
if pidRead != nil {
|
||||
pidRead.Close()
|
||||
}
|
||||
if pidWrite != nil {
|
||||
pidWrite.Close()
|
||||
}
|
||||
if continueRead != nil {
|
||||
continueRead.Close()
|
||||
}
|
||||
if continueWrite != nil {
|
||||
continueWrite.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the new process.
|
||||
err = c.Cmd.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Close the ends of the pipes that the parent doesn't need.
|
||||
continueRead.Close()
|
||||
continueRead = nil
|
||||
pidWrite.Close()
|
||||
pidWrite = nil
|
||||
|
||||
// Read the child's PID from the pipe.
|
||||
pidString := ""
|
||||
b := new(bytes.Buffer)
|
||||
io.Copy(b, pidRead)
|
||||
pidString = b.String()
|
||||
pid, err := strconv.Atoi(pidString)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error parsing PID %q", pidString)
|
||||
}
|
||||
pidString = fmt.Sprintf("%d", pid)
|
||||
|
||||
// If we created a new user namespace, set any specified mappings.
|
||||
if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 {
|
||||
// Always set "setgroups".
|
||||
setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString)
|
||||
}
|
||||
defer setgroups.Close()
|
||||
if c.GidMappingsEnableSetgroups {
|
||||
if _, err := fmt.Fprintf(setgroups, "allow"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
} else {
|
||||
if _, err := fmt.Fprintf(setgroups, "deny"); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err)
|
||||
return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 {
|
||||
uidmap, gidmap, err := GetHostIDMappings("")
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err)
|
||||
return errors.Wrapf(err, "error reading ID mappings in parent")
|
||||
}
|
||||
if len(c.UidMappings) == 0 {
|
||||
c.UidMappings = uidmap
|
||||
for i := range c.UidMappings {
|
||||
c.UidMappings[i].HostID = c.UidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
if len(c.GidMappings) == 0 {
|
||||
c.GidMappings = gidmap
|
||||
for i := range c.GidMappings {
|
||||
c.GidMappings[i].HostID = c.GidMappings[i].ContainerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.GidMappings) > 0 {
|
||||
// Build the GID map, since writing to the proc file has to be done all at once.
|
||||
g := new(bytes.Buffer)
|
||||
for _, m := range c.GidMappings {
|
||||
fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
// Set the GID map.
|
||||
if c.UseNewgidmap {
|
||||
cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...)
|
||||
g.Reset()
|
||||
cmd.Stdout = g
|
||||
cmd.Stderr = g
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error running newgidmap: %v: %s", err, g.String())
|
||||
return errors.Wrapf(err, "error running newgidmap: %s", g.String())
|
||||
}
|
||||
} else {
|
||||
gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString)
|
||||
}
|
||||
defer gidmap.Close()
|
||||
if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.UidMappings) > 0 {
|
||||
// Build the UID map, since writing to the proc file has to be done all at once.
|
||||
u := new(bytes.Buffer)
|
||||
for _, m := range c.UidMappings {
|
||||
fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
// Set the GID map.
|
||||
if c.UseNewuidmap {
|
||||
cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...)
|
||||
u.Reset()
|
||||
cmd.Stdout = u
|
||||
cmd.Stderr = u
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error running newuidmap: %v: %s", err, u.String())
|
||||
return errors.Wrapf(err, "error running newuidmap: %s", u.String())
|
||||
}
|
||||
} else {
|
||||
uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString)
|
||||
}
|
||||
defer uidmap.Close()
|
||||
if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err)
|
||||
return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.OOMScoreAdj != nil {
|
||||
oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err)
|
||||
return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString)
|
||||
}
|
||||
defer oomScoreAdj.Close()
|
||||
if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil {
|
||||
fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err)
|
||||
return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString)
|
||||
}
|
||||
}
|
||||
// Run any additional setup that we want to do before the child starts running proper.
|
||||
if c.Hook != nil {
|
||||
if err = c.Hook(pid); err != nil {
|
||||
fmt.Fprintf(continueWrite, "hook error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cmd) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
func (c *Cmd) CombinedOutput() ([]byte, error) {
|
||||
return nil, errors.New("unshare: CombinedOutput() not implemented")
|
||||
}
|
||||
|
||||
func (c *Cmd) Output() ([]byte, error) {
|
||||
return nil, errors.New("unshare: Output() not implemented")
|
||||
}
|
||||
|
||||
var (
|
||||
isRootlessOnce sync.Once
|
||||
isRootless bool
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
isRootlessOnce.Do(func() {
|
||||
isRootless = os.Geteuid() != 0 || os.Getenv(UsernsEnvName) != ""
|
||||
})
|
||||
return isRootless
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=done")
|
||||
}
|
||||
|
||||
type Runnable interface {
|
||||
Run() error
|
||||
}
|
||||
|
||||
func bailOnError(err error, format string, a ...interface{}) {
|
||||
if err != nil {
|
||||
if format != "" {
|
||||
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
|
||||
} else {
|
||||
logrus.Errorf("%v", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
// If we've already been through this once, no need to try again.
|
||||
if os.Geteuid() == 0 && IsRootless() {
|
||||
return
|
||||
}
|
||||
|
||||
var uidNum, gidNum uint64
|
||||
// Figure out who we are.
|
||||
me, err := user.Current()
|
||||
if !os.IsNotExist(err) {
|
||||
bailOnError(err, "error determining current user")
|
||||
uidNum, err = strconv.ParseUint(me.Uid, 10, 32)
|
||||
bailOnError(err, "error parsing current UID %s", me.Uid)
|
||||
gidNum, err = strconv.ParseUint(me.Gid, 10, 32)
|
||||
bailOnError(err, "error parsing current GID %s", me.Gid)
|
||||
}
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// ID mappings to use to reexec ourselves.
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
if uidNum != 0 || evenForRoot {
|
||||
// Read the set of ID mappings that we're allowed to use. Each
|
||||
// range in /etc/subuid and /etc/subgid file is a starting host
|
||||
// ID and a range size.
|
||||
uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username)
|
||||
bailOnError(err, "error reading allowed ID mappings")
|
||||
if len(uidmap) == 0 {
|
||||
logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username)
|
||||
}
|
||||
if len(gidmap) == 0 {
|
||||
logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username)
|
||||
}
|
||||
// Map our UID and GID, then the subuid and subgid ranges,
|
||||
// consecutively, starting at 0, to get the mappings to use for
|
||||
// a copy of ourselves.
|
||||
uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)
|
||||
gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)
|
||||
var rangeStart uint32
|
||||
for i := range uidmap {
|
||||
uidmap[i].ContainerID = rangeStart
|
||||
rangeStart += uidmap[i].Size
|
||||
}
|
||||
rangeStart = 0
|
||||
for i := range gidmap {
|
||||
gidmap[i].ContainerID = rangeStart
|
||||
rangeStart += gidmap[i].Size
|
||||
}
|
||||
} else {
|
||||
// If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able
|
||||
// to use unshare(), so don't bother creating a new user namespace at this point.
|
||||
capabilities, err := capability.NewPid(0)
|
||||
bailOnError(err, "error reading the current capabilities sets")
|
||||
if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {
|
||||
return
|
||||
}
|
||||
// Read the set of ID mappings that we're currently using.
|
||||
uidmap, gidmap, err = GetHostIDMappings("")
|
||||
bailOnError(err, "error reading current ID mappings")
|
||||
// Just reuse them.
|
||||
for i := range uidmap {
|
||||
uidmap[i].HostID = uidmap[i].ContainerID
|
||||
}
|
||||
for i := range gidmap {
|
||||
gidmap[i].HostID = gidmap[i].ContainerID
|
||||
}
|
||||
}
|
||||
|
||||
// Unlike most uses of reexec or unshare, we're using a name that
|
||||
// _won't_ be recognized as a registered reexec handler, since we
|
||||
// _want_ to fall through reexec.Init() to the normal main().
|
||||
cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...)
|
||||
|
||||
// If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.
|
||||
err = os.Setenv(UsernsEnvName, "1")
|
||||
bailOnError(err, "error setting %s=1 in environment", UsernsEnvName)
|
||||
|
||||
// Set the default isolation type to use the "rootless" method.
|
||||
if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present {
|
||||
if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil {
|
||||
logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse our stdio.
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
// Set up a new user namespace with the ID mapping.
|
||||
cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS
|
||||
cmd.UseNewuidmap = uidNum != 0
|
||||
cmd.UidMappings = uidmap
|
||||
cmd.UseNewgidmap = uidNum != 0
|
||||
cmd.GidMappings = gidmap
|
||||
cmd.GidMappingsEnableSetgroups = true
|
||||
|
||||
// Finish up.
|
||||
logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)
|
||||
ExecRunnable(cmd)
|
||||
}
|
||||
|
||||
// ExecRunnable runs the specified unshare command, captures its exit status,
|
||||
// and exits with the same status.
|
||||
func ExecRunnable(cmd Runnable) {
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := errors.Cause(err).(*exec.ExitError); ok {
|
||||
if exitError.ProcessState.Exited() {
|
||||
if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {
|
||||
if waitStatus.Exited() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(waitStatus.ExitStatus())
|
||||
}
|
||||
if waitStatus.Signaled() {
|
||||
logrus.Errorf("%v", exitError)
|
||||
os.Exit(int(waitStatus.Signal()) + 128)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Errorf("%v", err)
|
||||
logrus.Errorf("(unable to determine exit status)")
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// getHostIDMappings reads mappings from the named node under /proc.
|
||||
func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) {
|
||||
var mappings []specs.LinuxIDMapping
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading ID mappings from %q", path)
|
||||
}
|
||||
defer f.Close()
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 3 {
|
||||
return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields))
|
||||
}
|
||||
cid, err := strconv.ParseUint(fields[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path)
|
||||
}
|
||||
hid, err := strconv.ParseUint(fields[1], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path)
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[2], 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path)
|
||||
}
|
||||
mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)})
|
||||
}
|
||||
return mappings, nil
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
if pid == "" {
|
||||
pid = "self"
|
||||
}
|
||||
uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid.
|
||||
func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
mappings, err := idtools.NewIDMappings(user, group)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group)
|
||||
}
|
||||
var uidmap, gidmap []specs.LinuxIDMapping
|
||||
for _, m := range mappings.UIDs() {
|
||||
uidmap = append(uidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
for _, m := range mappings.GIDs() {
|
||||
gidmap = append(gidmap, specs.LinuxIDMapping{
|
||||
ContainerID: uint32(m.ContainerID),
|
||||
HostID: uint32(m.HostID),
|
||||
Size: uint32(m.Size),
|
||||
})
|
||||
}
|
||||
return uidmap, gidmap, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
10
vendor/github.com/containers/buildah/pkg/unshare/unshare_cgo.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build linux,cgo,!gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
25
vendor/github.com/containers/buildah/pkg/unshare/unshare_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// +build linux,cgo,gccgo
|
||||
|
||||
package unshare
|
||||
|
||||
// #cgo CFLAGS: -Wall -Wextra
|
||||
// extern void _containers_unshare(void);
|
||||
// void __attribute__((constructor)) init(void) {
|
||||
// _containers_unshare();
|
||||
// }
|
||||
import "C"
|
||||
|
||||
// This next bit is straight out of libcontainer.
|
||||
|
||||
// AlwaysFalse is here to stay false
|
||||
// (and be exported so the compiler doesn't optimize out its reference)
|
||||
var AlwaysFalse bool
|
||||
|
||||
func init() {
|
||||
if AlwaysFalse {
|
||||
// by referencing this C init() in a noop test, it will ensure the compiler
|
||||
// links in the C function.
|
||||
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134
|
||||
C.init()
|
||||
}
|
||||
}
|
||||
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
45
vendor/github.com/containers/buildah/pkg/unshare/unshare_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// +build !linux
|
||||
|
||||
package unshare
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// UsernsEnvName is the environment variable, if set indicates in rootless mode
|
||||
UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED"
|
||||
)
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
func GetRootlessUID() int {
|
||||
return os.Getuid()
|
||||
}
|
||||
|
||||
// RootlessEnv returns the environment settings for the rootless containers
|
||||
func RootlessEnv() []string {
|
||||
return append(os.Environ(), UsernsEnvName+"=")
|
||||
}
|
||||
|
||||
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
|
||||
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
||||
}
|
||||
|
||||
// GetHostIDMappings reads mappings for the specified process (or the current
|
||||
// process if pid is "self" or an empty string) from the kernel.
|
||||
func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// ParseIDMappings parses mapping triples.
|
||||
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
68
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
68
vendor/github.com/containers/buildah/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/blang/semver v3.5.0
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containernetworking/cni v0.7.0-rc2
|
||||
github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/containers/storage v1.12.2
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
|
||||
github.com/docker/docker-credential-helpers v0.6.1
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-units v0.3.2
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/libnetwork 1a06131fb8a047d919f7deaf02a4c414d7884b83
|
||||
github.com/fsouza/go-dockerclient v1.3.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gogo/protobuf v1.2.0
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/gorilla/mux v1.6.2
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/imdario/mergo v0.3.6
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/Microsoft/go-winio v0.4.11
|
||||
github.com/Microsoft/hcsshim v0.8.3
|
||||
github.com/mistifyio/go-zfs v2.1.1
|
||||
github.com/moby/moby f8806b18b4b92c5e1980f6e11c917fad201cd73c
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# TODO: Gotty has not been updated since 2012. Can we find a replacement?
|
||||
github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-tools v0.8.0
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/openshift/imagebuilder v1.1.0
|
||||
github.com/ostreedev/ostree-go 9ab99253d365aac3a330d1f7281cf29f3d22820b
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/seccomp/libseccomp-golang v0.9.0
|
||||
github.com/seccomp/containers-golang v0.1
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
golang.org/x/crypto ff983b9c42bc9fbf91556e191cc8efb585c16908 https://github.com/golang/crypto
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83 https://github.com/golang/net
|
||||
golang.org/x/sync 37e7f081c4d4c64e13b10787722085407fe5d15f https://github.com/golang/sync
|
||||
golang.org/x/sys 7fbe1cd0fcc20051e1fcb87fbabec4a1bacaaeba https://github.com/golang/sys
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d https://github.com/golang/text
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0 https://github.com/kubernetes/client-go
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/onsi/gomega v1.4.3
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.3
|
||||
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
|
||||
2
vendor/github.com/containers/image/README.md
generated
vendored
2
vendor/github.com/containers/image/README.md
generated
vendored
@@ -65,7 +65,7 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## [Contributing](CONTRIBUTING.md)**
|
||||
## [Contributing](CONTRIBUTING.md)
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
|
||||
34
vendor/github.com/containers/image/copy/copy.go
generated
vendored
34
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -468,7 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) {
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
@@ -483,24 +483,18 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
|
||||
}
|
||||
data[index] = cld
|
||||
bar.SetTotal(srcLayer.Size, true)
|
||||
}
|
||||
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
progressBars := make([]*mpb.Bar, numLayers)
|
||||
for i, srcInfo := range srcInfos {
|
||||
progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
|
||||
}
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressBars[i])
|
||||
go copyLayerHelper(i, srcLayer, progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
@@ -592,7 +586,7 @@ func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
@@ -604,11 +598,12 @@ func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind
|
||||
}
|
||||
|
||||
bar := pool.AddBar(info.Size,
|
||||
mpb.BarClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(prefix),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.CountersKibiByte("%.1f / %.1f"),
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete),
|
||||
),
|
||||
)
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
@@ -629,7 +624,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config")
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -656,7 +651,7 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) {
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
@@ -668,6 +663,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists")
|
||||
bar.SetTotal(0, true)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
}
|
||||
@@ -679,10 +676,14 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
|
||||
diffID := cachedDiffID
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -695,11 +696,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
} else {
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
return blobInfo, diffID, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
|
||||
73
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
73
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,28 +81,30 @@ type bearerToken struct {
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
client *http.Client
|
||||
insecureSkipTLSVerify bool
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
|
||||
// tlsClientConfig is setup by newDockerClient and will be used and updated
|
||||
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
|
||||
tlsClientConfig *tls.Config
|
||||
// The following members are not set by newDockerClient and must be set by callers if needed.
|
||||
username string
|
||||
password string
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
client *http.Client
|
||||
scheme string
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
|
||||
// Private state for setupRequestAuth (key: string, value: bearerToken)
|
||||
tokenCache sync.Map
|
||||
// detectPropertiesError caches the initial error.
|
||||
detectPropertiesError error
|
||||
// detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest().
|
||||
detectPropertiesOnce sync.Once
|
||||
// Private state for detectProperties:
|
||||
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
|
||||
detectPropertiesError error // detectPropertiesError caches the initial error.
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
@@ -197,7 +199,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
@@ -229,8 +231,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = serverDefault()
|
||||
tlsClientConfig := serverDefault()
|
||||
|
||||
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
|
||||
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
|
||||
@@ -241,38 +242,31 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
|
||||
if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if TLS verification shall be skipped (default=false) which can
|
||||
// either be specified in the sysregistriesv2 configuration or via the
|
||||
// SystemContext, whereas the SystemContext is prioritized.
|
||||
// be specified in the sysregistriesv2 configuration.
|
||||
skipVerify := false
|
||||
if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
// Only use the SystemContext if the actual value is defined.
|
||||
skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
} else {
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
}
|
||||
tr.TLSClientConfig.InsecureSkipVerify = skipVerify
|
||||
if reg != nil {
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
tlsClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
client: &http.Client{Transport: tr},
|
||||
insecureSkipTLSVerify: skipVerify,
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
tlsClientConfig: tlsClientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occcured while making the http request or the status code received was 401
|
||||
// returns an error if an error occurred while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
@@ -557,9 +551,14 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
// it at most once.
|
||||
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
// We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
|
||||
// specified by the system context
|
||||
if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
}
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
tr.TLSClientConfig = c.tlsClientConfig
|
||||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
@@ -579,7 +578,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
err := ping("https")
|
||||
if err != nil && c.insecureSkipTLSVerify {
|
||||
if err != nil && c.tlsClientConfig.InsecureSkipVerify {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -603,7 +602,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.insecureSkipTLSVerify {
|
||||
if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
|
||||
2
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
2
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -25,7 +25,7 @@ type Image struct {
|
||||
// a client to the registry hosting the given image.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
|
||||
s, err := newImageSource(sys, ref)
|
||||
s, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
@@ -129,7 +129,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
98
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
98
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -28,17 +29,94 @@ type dockerImageSource struct {
|
||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||
}
|
||||
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
c, err := newDockerClientFromRef(sys, ref, false, "pull")
|
||||
// newImageSource creates a new `ImageSource` for the specified image reference
|
||||
// `ref`.
|
||||
//
|
||||
// The following steps will be done during the instance creation:
|
||||
//
|
||||
// - Lookup the registry within the configured location in
|
||||
// `sys.SystemRegistriesConfPath`. If there is no configured registry available,
|
||||
// we fallback to the provided docker reference `ref`.
|
||||
//
|
||||
// - References which contain a configured prefix will be automatically rewritten
|
||||
// to the correct target reference. For example, if the configured
|
||||
// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be
|
||||
// pulled from the ref `example.com/foo/image`, then the resulting pull will
|
||||
// effectively point to `example.com/image`.
|
||||
//
|
||||
// - If the rewritten reference succeeds, it will be used as the `dockerRef`
|
||||
// in the client. If the rewrite fails, the function immediately returns an error.
|
||||
//
|
||||
// - Each mirror will be used (in the configured order) to test the
|
||||
// availability of the image manifest on the remote location. For example,
|
||||
// if the manifest is not reachable due to connectivity issues, then the next
|
||||
// mirror will be tested instead. If no mirror is configured or contains the
|
||||
// target manifest, then the initial `ref` will be tested as fallback. The
|
||||
// creation of the new `dockerImageSource` only succeeds if a remote
|
||||
// location with the available manifest was found.
|
||||
//
|
||||
// A cleanup call to `.Close()` is needed if the caller is done using the returned
|
||||
// `ImageSource`.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "error loading registries configuration")
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
c: c,
|
||||
}, nil
|
||||
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so we create
|
||||
// a fallback registry by hand to make the client creation below work
|
||||
// as intended.
|
||||
registry = &sysregistriesv2.Registry{
|
||||
Endpoint: sysregistriesv2.Endpoint{
|
||||
Location: ref.ref.String(),
|
||||
},
|
||||
Prefix: ref.ref.String(),
|
||||
}
|
||||
}
|
||||
|
||||
primaryDomain := reference.Domain(ref.ref)
|
||||
// Found the registry within the sysregistriesv2 configuration. Now we test
|
||||
// all endpoints for the manifest availability. If a working image source
|
||||
// was found, it will be used for all future pull actions.
|
||||
manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
for _, endpoint := range append(registry.Mirrors, registry.Endpoint) {
|
||||
logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location)
|
||||
|
||||
newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dockerRef, err := newReference(newRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure
|
||||
|
||||
testImageSource := &dockerImageSource{
|
||||
ref: dockerRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
|
||||
if manifestLoadErr == nil {
|
||||
return testImageSource, nil
|
||||
}
|
||||
}
|
||||
return nil, manifestLoadErr
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
|
||||
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@@ -61,8 +61,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref)
|
||||
}
|
||||
|
||||
// newReference returns a dockerReference for a named reference.
|
||||
func newReference(ref reference.Named) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
@@ -72,8 +77,9 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
}, nil
|
||||
@@ -135,7 +141,7 @@ func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContex
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(sys, ref)
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
||||
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
28
vendor/github.com/containers/image/docs/containers-certs.d.5.md
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
% containers-certs.d(5)
|
||||
|
||||
# NAME
|
||||
containers-certs.d - Directory for storing custom container-registry TLS configurations
|
||||
|
||||
# DESCRIPTION
|
||||
A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`.
|
||||
The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`).
|
||||
|
||||
## Directory Structure
|
||||
A certs directory can contain one or more files with the following extensions:
|
||||
|
||||
* `*.crt` files with this extensions will be interpreted as CA certificates
|
||||
* `*.cert` files with this extensions will be interpreted as client certificates
|
||||
* `*.key` files with this extensions will be interpreted as client keys
|
||||
|
||||
Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`).
|
||||
An examplary setup for a registry running at `my-registry.com:5000` may look as follows:
|
||||
```
|
||||
/etc/containers/certs.d/ <- Certificate directory
|
||||
└── my-registry.com:5000 <- Hostname:port
|
||||
├── client.cert <- Client certificate
|
||||
├── client.key <- Client key
|
||||
└── ca.crt <- Certificate authority that signed the registry certificate
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Feb 2019, Originally compiled by Valentin Rothberg <rothberg@redhat.com>
|
||||
77
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
77
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
@@ -7,14 +7,68 @@ containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML. The valid
|
||||
categories are: 'registries.search', 'registries.insecure', and
|
||||
'registries.block'.
|
||||
file for container image registries. The file format is TOML.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
# FORMATS
|
||||
|
||||
## VERSION 2
|
||||
VERSION 2 is the latest format of the `registries.conf` and is currently in
|
||||
beta. This means in general VERSION 1 should be used in production environments
|
||||
for now.
|
||||
|
||||
Every registry can have its own mirrors configured. The mirrors will be tested
|
||||
in order for the availability of the remote manifest. This happens currently
|
||||
only during an image pull. If the manifest is not reachable due to connectivity
|
||||
issues or the unavailability of the remote manifest, then the next mirror will
|
||||
be tested instead. If no mirror is configured or contains the manifest to be
|
||||
pulled, then the initially provided reference will be used as fallback. It is
|
||||
possible to set the `insecure` option per mirror, too.
|
||||
|
||||
Furthermore it is possible to specify a `prefix` for a registry. The `prefix`
|
||||
is used to find the relevant target registry from where the image has to be
|
||||
pulled. During the test for the availability of the image, the prefixed
|
||||
location will be rewritten to the correct remote location. This applies to
|
||||
mirrors as well as the fallback `location`. If no prefix is specified, it
|
||||
defaults to the specified `location`. For example, if
|
||||
`prefix = "example.com/foo"`, `location = "example.com"` and the image will be
|
||||
pulled from `example.com/foo/image`, then the resulting pull will be effectively
|
||||
point to `example.com/image`.
|
||||
|
||||
By default container runtimes use TLS when retrieving images from a registry.
|
||||
If the registry is not setup with TLS, then the container runtime will fail to
|
||||
pull images from the registry. If you set `insecure = true` for a registry or a
|
||||
mirror you overwrite the `insecure` flag for that specific entry. This means
|
||||
that the container runtime will attempt use unencrypted HTTP to pull the image.
|
||||
It also allows you to pull from a registry with self-signed certificates.
|
||||
|
||||
If you set the `unqualified-search = true` for the registry, then it is possible
|
||||
to omit the registry hostname when pulling images. This feature does not work
|
||||
together with a specified `prefix`.
|
||||
|
||||
If `blocked = true` then it is not allowed to pull images from that registry.
|
||||
|
||||
### EXAMPLE
|
||||
|
||||
```
|
||||
[[registry]]
|
||||
location = "example.com"
|
||||
insecure = false
|
||||
prefix = "example.com/foo"
|
||||
unqualified-search = false
|
||||
blocked = false
|
||||
mirror = [
|
||||
{ location = "example-mirror-0.local", insecure = false },
|
||||
{ location = "example-mirror-1.local", insecure = true }
|
||||
]
|
||||
```
|
||||
|
||||
## VERSION 1
|
||||
VERSION 1 can be used as alternative to the VERSION 2, but it is not capable in
|
||||
using registry mirrors or a prefix.
|
||||
|
||||
The TOML_format is used to build a simple list for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
@@ -22,18 +76,13 @@ Search registries are used when the caller of a container runtime does not fully
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
The fields `registries.insecure` and `registries.block` work as like as the
|
||||
`insecure` and `blocked` from VERSION 2.
|
||||
|
||||
# EXAMPLE
|
||||
### EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
@@ -49,6 +98,8 @@ registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Mar 2019, Added additional configuration format by Sascha Grunert <sgrunert@suse.com>
|
||||
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% container-signature(5) Container signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
# Container signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
This document describes the format of container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
@@ -21,7 +21,7 @@ an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
A container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
@@ -34,7 +34,7 @@ associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
As distributed, the container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
@@ -46,7 +46,7 @@ that this is not necessary and the configured expected public key provides anoth
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
Consumers of container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
@@ -89,7 +89,7 @@ and if the semantics of the invalid document, as created by such an implementati
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
The `critical` object MUST contain a `type` member identifying the document as a container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
@@ -210,7 +210,7 @@ Consumers still SHOULD reject any signature where a member of an `optional` obje
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
The contents of this string is not defined in detail; however each implementation creating container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
@@ -221,7 +221,7 @@ The contents of this string is not defined in detail; however each implementatio
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
109
vendor/github.com/containers/image/docs/containers-transports.5.md
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page
|
||||
% Valentin Rothberg
|
||||
% April 2019
|
||||
|
||||
## NAME
|
||||
|
||||
containers-transports - description of supported transports for copying and storing container images
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations.
|
||||
The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below.
|
||||
|
||||
### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]}
|
||||
|
||||
An image located in a local containers storage.
|
||||
The format of _docker-reference_ is described in detail in the **docker** transport.
|
||||
|
||||
The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory.
|
||||
The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored.
|
||||
The optional `options` are a comma-separated list of driver-specific options.
|
||||
Please refer to containers-storage.conf(5) for further information on the drivers and supported options.
|
||||
|
||||
### **dir:**_path_
|
||||
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files.
|
||||
This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
### **docker://**_docker-reference_
|
||||
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2".
|
||||
By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1).
|
||||
If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1).
|
||||
The containers-registries.conf(5) further allows for configuring various settings of a registry.
|
||||
|
||||
Note that a _docker-reference_ has the following format: `name[:tag|@digest]`.
|
||||
While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do.
|
||||
Digests can also be used in an image destination as long as the manifest matches the provided digest.
|
||||
The digest of images can be explored with skopeo-inspect(1).
|
||||
If `name` does not contain a slash, it is treated as `docker.io/library/name`.
|
||||
Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost).
|
||||
If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`.
|
||||
|
||||
### **docker-archive:**_path[:docker-reference]_
|
||||
|
||||
An image is stored in the docker-save(1) formatted file.
|
||||
_docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable.
|
||||
|
||||
### **docker-daemon:**_docker-reference|algo:digest_
|
||||
|
||||
An image stored in the docker daemon's internal storage.
|
||||
The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source.
|
||||
The _algo:digest_ refers to the image ID reported by docker-inspect(1).
|
||||
|
||||
### **oci:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" at _path_.
|
||||
Using a _tag_ is optional and allows for storing multiple images at the same _path_.
|
||||
|
||||
### **oci-archive:**_path[:tag]_
|
||||
|
||||
An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_.
|
||||
|
||||
### **ostree:**_docker-reference[@/absolute/repo/path]_
|
||||
|
||||
An image in the local ostree(1) repository.
|
||||
_/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
## Examples
|
||||
|
||||
The following examples demonstrate how some of the containers transports can be used.
|
||||
The examples use skopeo-copy(1) for copying container images.
|
||||
|
||||
**Copying an image from one registry to another**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest
|
||||
```
|
||||
|
||||
**Copying an image from a running Docker daemon to a directory in the OCI layout**:
|
||||
```
|
||||
$ mkdir alpine-oci
|
||||
$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci
|
||||
$ tree alpine-oci
|
||||
test-oci/
|
||||
├── blobs
|
||||
│ └── sha256
|
||||
│ ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7
|
||||
│ ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc
|
||||
│ └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7
|
||||
├── index.json
|
||||
└── oci-layout
|
||||
|
||||
2 directories, 5 files
|
||||
```
|
||||
|
||||
**Copying an image from a registry to the local storage**:
|
||||
```
|
||||
$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
Valentin Rothberg <rothberg@redhat.com>
|
||||
6
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
6
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -96,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/image/oci.go
generated
vendored
4
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
package blobinfocache
|
||||
// Package boltdb implements a BlobInfoCache backed by BoltDB.
|
||||
package boltdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,8 +7,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/types"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,22 +83,23 @@ func unlockPath(path string) {
|
||||
}
|
||||
}
|
||||
|
||||
// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type boltDBCache struct {
|
||||
type cache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
// Most users should call DefaultCache instead.
|
||||
func NewBoltDBCache(path string) types.BlobInfoCache {
|
||||
return &boltDBCache{path: path}
|
||||
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
//
|
||||
// Most users should call blobinfocache.DefaultCache instead.
|
||||
func New(path string) types.BlobInfoCache {
|
||||
return &cache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
@@ -122,7 +125,7 @@ func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
@@ -139,7 +142,7 @@ func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
@@ -166,7 +169,7 @@ func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest)
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
@@ -182,7 +185,7 @@ func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
@@ -219,7 +222,7 @@ func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
@@ -248,8 +251,8 @@ func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scop
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
||||
func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
||||
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
|
||||
b := scopeBucket.Bucket([]byte(digest.String()))
|
||||
if b == nil {
|
||||
return candidates
|
||||
@@ -259,12 +262,12 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
lastSeen: t,
|
||||
LastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
@@ -277,8 +280,8 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []candidateWithTime{}
|
||||
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
@@ -325,5 +328,5 @@ func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope
|
||||
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
||||
20
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
20
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
@@ -4,7 +4,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/image/pkg/blobinfocache/boltdb"
|
||||
"github.com/containers/image/pkg/blobinfocache/memory"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -45,19 +48,28 @@ func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||
}
|
||||
|
||||
func getRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
dir, err := blobInfoCacheDir(sys, os.Geteuid())
|
||||
dir, err := blobInfoCacheDir(sys, getRootlessUID())
|
||||
if err != nil {
|
||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||
return NewMemoryCache()
|
||||
return memory.New()
|
||||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
|
||||
return NewMemoryCache()
|
||||
return memory.New()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return NewBoltDBCache(path)
|
||||
return boltdb.New(path)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
package blobinfocache
|
||||
// Package prioritize provides utilities for prioritizing locations in
|
||||
// types.BlobInfoCache.CandidateLocations.
|
||||
package prioritize
|
||||
|
||||
import (
|
||||
"sort"
|
||||
@@ -13,16 +15,16 @@ import (
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type candidateWithTime struct {
|
||||
candidate types.BICReplacementCandidate // The replacement candidate
|
||||
lastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate types.BICReplacementCandidate // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
type candidateSortState struct {
|
||||
cs []candidateWithTime // The entries to sort
|
||||
cs []CandidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
@@ -40,35 +42,35 @@ func (css *candidateSortState) Less(i, j int) bool {
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.candidate.Digest != xj.candidate.Digest {
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.candidate.Digest == css.primaryDigest {
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
}
|
||||
if xj.candidate.Digest == css.primaryDigest {
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.candidate.Digest == css.uncompressedDigest {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
}
|
||||
if xj.candidate.Digest == css.uncompressedDigest {
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else { // xi.candidate.Digest == xj.candidate.Digest
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
|
||||
return xi.LastSeen.After(xj.LastSeen)
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.candidate.Digest < xj.candidate.Digest
|
||||
return xi.Candidate.Digest < xj.Candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
@@ -77,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
sort.Sort(&candidateSortState{
|
||||
@@ -92,17 +94,17 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime,
|
||||
}
|
||||
res := make([]types.BICReplacementCandidate, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].candidate
|
||||
res[i] = cs[i].Candidate
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
||||
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
package blobinfocache
|
||||
// Package memory implements an in-memory BlobInfoCache.
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -16,21 +18,25 @@ type locationKey struct {
|
||||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// memoryCache implements an in-memory-only BlobInfoCache
|
||||
type memoryCache struct {
|
||||
mutex *sync.Mutex // synchronizes concurrent accesses
|
||||
// cache implements an in-memory-only BlobInfoCache
|
||||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
|
||||
// This is primarily intended for tests, but also used as a fallback if DefaultCache
|
||||
// can’t determine, or set up, the location for a persistent cache.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
||||
func NewMemoryCache() types.BlobInfoCache {
|
||||
return &memoryCache{
|
||||
mutex: new(sync.Mutex),
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
//
|
||||
// This is primarily intended for tests, but also used as a fallback
|
||||
// if blobinfocache.DefaultCache can’t determine, or set up, the
|
||||
// location for a persistent cache. Most users should use
|
||||
// blobinfocache.DefaultCache. instead of calling this directly.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use
|
||||
// this instead of a persistent cache.
|
||||
func New() types.BlobInfoCache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
@@ -40,16 +46,14 @@ func NewMemoryCache() types.BlobInfoCache {
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
return mem.uncompressedDigest(anyDigest)
|
||||
return mem.uncompressedDigestLocked(anyDigest)
|
||||
}
|
||||
|
||||
// uncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
|
||||
func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
@@ -67,7 +71,7 @@ func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Diges
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
@@ -85,7 +89,7 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
@@ -97,16 +101,16 @@ func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scop
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
|
||||
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: l,
|
||||
},
|
||||
lastSeen: t,
|
||||
LastSeen: t,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
@@ -118,14 +122,14 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []candidateWithTime{}
|
||||
res := []prioritize.CandidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
@@ -137,5 +141,5 @@ func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope
|
||||
}
|
||||
}
|
||||
}
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
package blobinfocache
|
||||
// Package none implements a dummy BlobInfoCache which records no data.
|
||||
package none
|
||||
|
||||
import (
|
||||
"github.com/containers/image/types"
|
||||
@@ -11,9 +12,10 @@ type noCache struct {
|
||||
|
||||
// NoCache implements BlobInfoCache by not recording any data.
|
||||
//
|
||||
// This exists primarily for implementations of configGetter for Manifest.Inspect,
|
||||
// because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
|
||||
// This exists primarily for implementations of configGetter for
|
||||
// Manifest.Inspect, because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a
|
||||
// short-lived cache, ideally blobinfocache.DefaultCache.
|
||||
var NoCache types.BlobInfoCache = noCache{}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
15
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
15
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@@ -85,21 +85,6 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
// GetUserLoggedIn returns the username logged in to registry from either
|
||||
// auth.json or XDG_RUNTIME_DIR
|
||||
// Used to tell the user if someone is logged in to the registry when logging in
|
||||
func GetUserLoggedIn(sys *types.SystemContext, registry string) (string, error) {
|
||||
path, err := getPathToAuth(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
username, _, _ := findAuthentication(registry, path, false)
|
||||
if username != "" {
|
||||
return username, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// RemoveAuthentication deletes the credentials stored in auth.json
|
||||
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
|
||||
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
|
||||
|
||||
92
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
92
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@@ -10,6 +10,10 @@ import (
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
)
|
||||
|
||||
// systemRegistriesConfPath is the path to the system-wide registry
|
||||
@@ -22,34 +26,48 @@ var systemRegistriesConfPath = builtinRegistriesConfPath
|
||||
// DO NOT change this, instead see systemRegistriesConfPath above.
|
||||
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
|
||||
|
||||
// Mirror represents a mirror. Mirrors can be used as pull-through caches for
|
||||
// registries.
|
||||
type Mirror struct {
|
||||
// The mirror's URL.
|
||||
URL string `toml:"url"`
|
||||
// Endpoint describes a remote location of a registry.
|
||||
type Endpoint struct {
|
||||
// The endpoint's remote location.
|
||||
Location string `toml:"location"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
Insecure bool `toml:"insecure"`
|
||||
}
|
||||
|
||||
// RewriteReference will substitute the provided reference `prefix` to the
|
||||
// endpoints `location` from the `ref` and creates a new named reference from it.
|
||||
// The function errors if the newly created reference is not parsable.
|
||||
func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
|
||||
refString := ref.String()
|
||||
if !refMatchesPrefix(refString, prefix) {
|
||||
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
|
||||
}
|
||||
|
||||
newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
|
||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error rewriting reference")
|
||||
}
|
||||
logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
|
||||
return newParsedRef, nil
|
||||
}
|
||||
|
||||
// Registry represents a registry.
|
||||
type Registry struct {
|
||||
// Serializable registry URL.
|
||||
URL string `toml:"url"`
|
||||
// A registry is an Endpoint too
|
||||
Endpoint
|
||||
// The registry's mirrors.
|
||||
Mirrors []Mirror `toml:"mirror"`
|
||||
Mirrors []Endpoint `toml:"mirror"`
|
||||
// If true, pulling from the registry will be blocked.
|
||||
Blocked bool `toml:"blocked"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
Insecure bool `toml:"insecure"`
|
||||
// If true, the registry can be used when pulling an unqualified image.
|
||||
Search bool `toml:"unqualified-search"`
|
||||
// Prefix is used for matching images, and to translate one namespace to
|
||||
// another. If `Prefix="example.com/bar"`, `URL="example.com/foo/bar"`
|
||||
// another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
|
||||
// and we pull from "example.com/bar/myimage:latest", the image will
|
||||
// effectively be pulled from "example.com/foo/bar/myimage:latest".
|
||||
// If no Prefix is specified, it defaults to the specified URL.
|
||||
// If no Prefix is specified, it defaults to the specified location.
|
||||
Prefix string `toml:"prefix"`
|
||||
}
|
||||
|
||||
@@ -84,18 +102,18 @@ func (e *InvalidRegistries) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// parseURL parses the input string, performs some sanity checks and returns
|
||||
// parseLocation parses the input string, performs some sanity checks and returns
|
||||
// the sanitized input string. An error is returned if the input string is
|
||||
// empty or if contains an "http{s,}://" prefix.
|
||||
func parseURL(input string) (string, error) {
|
||||
func parseLocation(input string) (string, error) {
|
||||
trimmed := strings.TrimRight(input, "/")
|
||||
|
||||
if trimmed == "" {
|
||||
return "", &InvalidRegistries{s: "invalid URL: cannot be empty"}
|
||||
return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
|
||||
msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input)
|
||||
msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
|
||||
return "", &InvalidRegistries{s: msg}
|
||||
}
|
||||
|
||||
@@ -111,21 +129,21 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
|
||||
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
|
||||
registryOrder := []string{}
|
||||
|
||||
getRegistry := func(url string) (*Registry, error) { // Note: _pointer_ to a long-lived object
|
||||
getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
|
||||
var err error
|
||||
url, err = parseURL(url)
|
||||
location, err = parseLocation(location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg, exists := regMap[url]
|
||||
reg, exists := regMap[location]
|
||||
if !exists {
|
||||
reg = &Registry{
|
||||
URL: url,
|
||||
Mirrors: []Mirror{},
|
||||
Prefix: url,
|
||||
Endpoint: Endpoint{Location: location},
|
||||
Mirrors: []Endpoint{},
|
||||
Prefix: location,
|
||||
}
|
||||
regMap[url] = reg
|
||||
registryOrder = append(registryOrder, url)
|
||||
regMap[location] = reg
|
||||
registryOrder = append(registryOrder, location)
|
||||
}
|
||||
return reg, nil
|
||||
}
|
||||
@@ -155,15 +173,15 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
|
||||
}
|
||||
|
||||
registries := []Registry{}
|
||||
for _, url := range registryOrder {
|
||||
reg := regMap[url]
|
||||
for _, location := range registryOrder {
|
||||
reg := regMap[location]
|
||||
registries = append(registries, *reg)
|
||||
}
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// postProcessRegistries checks the consistency of all registries (e.g., set
|
||||
// the Prefix to URL if not set) and applies conflict checks. It returns an
|
||||
// the Prefix to Location if not set) and applies conflict checks. It returns an
|
||||
// array of cleaned registries and error in case of conflicts.
|
||||
func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
var registries []Registry
|
||||
@@ -172,16 +190,16 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
for _, reg := range regs {
|
||||
var err error
|
||||
|
||||
// make sure URL and Prefix are valid
|
||||
reg.URL, err = parseURL(reg.URL)
|
||||
// make sure Location and Prefix are valid
|
||||
reg.Location, err = parseLocation(reg.Location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if reg.Prefix == "" {
|
||||
reg.Prefix = reg.URL
|
||||
reg.Prefix = reg.Location
|
||||
} else {
|
||||
reg.Prefix, err = parseURL(reg.Prefix)
|
||||
reg.Prefix, err = parseLocation(reg.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -189,13 +207,13 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
|
||||
// make sure mirrors are valid
|
||||
for _, mir := range reg.Mirrors {
|
||||
mir.URL, err = parseURL(mir.URL)
|
||||
mir.Location, err = parseLocation(mir.Location)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
registries = append(registries, reg)
|
||||
regMap[reg.URL] = append(regMap[reg.URL], reg)
|
||||
regMap[reg.Location] = append(regMap[reg.Location], reg)
|
||||
}
|
||||
|
||||
// Given a registry can be mentioned multiple times (e.g., to have
|
||||
@@ -205,15 +223,15 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
// Note: we need to iterate over the registries array to ensure a
|
||||
// deterministic behavior which is not guaranteed by maps.
|
||||
for _, reg := range registries {
|
||||
others, _ := regMap[reg.URL]
|
||||
others, _ := regMap[reg.Location]
|
||||
for _, other := range others {
|
||||
if reg.Insecure != other.Insecure {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.URL)
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
|
||||
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
if reg.Blocked != other.Blocked {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.URL)
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
}
|
||||
|
||||
65
vendor/github.com/containers/image/registries.conf
generated
vendored
Normal file
65
vendor/github.com/containers/image/registries.conf
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# For more information on this configuration file, see containers-registries.conf(5).
|
||||
#
|
||||
# There are multiple versions of the configuration syntax available, where the
|
||||
# second iteration is backwards compatible to the first one. Mixing up both
|
||||
# formats will result in an runtime error.
|
||||
#
|
||||
# The initial configuration format looks like this:
|
||||
#
|
||||
# Registries to search for images that are not fully-qualified.
|
||||
# i.e. foobar.com/my_image:latest vs my_image:latest
|
||||
[registries.search]
|
||||
registries = []
|
||||
|
||||
# Registries that do not use TLS when pulling images or uses self-signed
|
||||
# certificates.
|
||||
[registries.insecure]
|
||||
registries = []
|
||||
|
||||
# Blocked Registries, blocks the `docker daemon` from pulling from the blocked registry. If you specify
|
||||
# "*", then the docker daemon will only be allowed to pull from registries listed above in the search
|
||||
# registries. Blocked Registries is deprecated because other container runtimes and tools will not use it.
|
||||
# It is recommended that you use the trust policy file /etc/containers/policy.json to control which
|
||||
# registries you want to allow users to pull and push from. policy.json gives greater flexibility, and
|
||||
# supports all container runtimes and tools including the docker daemon, cri-o, buildah ...
|
||||
# The atomic CLI `atomic trust` can be used to easily configure the policy.json file.
|
||||
[registries.block]
|
||||
registries = []
|
||||
|
||||
# The second version of the configuration format allows to specify registry
|
||||
# mirrors:
|
||||
#
|
||||
# [[registry]]
|
||||
# # The main location of the registry
|
||||
# location = "example.com"
|
||||
#
|
||||
# # If true, certs verification will be skipped and HTTP (non-TLS) connections
|
||||
# # will be allowed.
|
||||
# insecure = false
|
||||
#
|
||||
# # Prefix is used for matching images, and to translate one namespace to
|
||||
# # another. If `prefix = "example.com/foo"`, `location = "example.com"` and
|
||||
# # we pull from "example.com/foo/myimage:latest", the image will effectively be
|
||||
# # pulled from "example.com/myimage:latest". If no Prefix is specified,
|
||||
# # it defaults to the specified `location`. When a prefix is used, then a pull
|
||||
# # without specifying the prefix is not possible any more.
|
||||
# prefix = "example.com/foo"
|
||||
#
|
||||
# # If true, the registry can be used when pulling an unqualified image. If a
|
||||
# # prefix is specified, unqualified pull is not possible any more.
|
||||
# unqualified-search = false
|
||||
#
|
||||
# # If true, pulling from the registry will be blocked.
|
||||
# blocked = false
|
||||
#
|
||||
# # All available mirrors of the registry. The mirrors will be evaluated in
|
||||
# # order during an image pull. Furthermore it is possible to specify the
|
||||
# # `insecure` flag per registry mirror, too.
|
||||
# mirror = [
|
||||
# { location = "example-mirror-0.local", insecure = false },
|
||||
# { location = "example-mirror-1.local", insecure = true },
|
||||
# # It is also possible to specify an additional path within the `location`.
|
||||
# # A pull to `example.com/foo/image:latest` will then result in
|
||||
# # `example-mirror-2.local/path/image:latest`.
|
||||
# { location = "example-mirror-2.local/path" },
|
||||
# ]
|
||||
2
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
2
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
@@ -30,7 +30,7 @@ import (
|
||||
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
|
||||
var systemDefaultPolicyPath = builtinDefaultPolicyPath
|
||||
|
||||
// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
|
||||
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
// DO NOT change this, instead see systemDefaultPolicyPath above.
|
||||
const builtinDefaultPolicyPath = "/etc/containers/policy.json"
|
||||
|
||||
|
||||
14
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
14
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
@@ -595,12 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seeen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
@@ -732,7 +732,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
@@ -765,14 +765,14 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing manifest digest")
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
@@ -781,7 +781,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
// Save the signatures, if we have any.
|
||||
if len(s.signatures) > 0 {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
5
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@@ -180,7 +180,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
// Return the transport's previously-set store. If we don't have one
|
||||
// of those, initialize one now.
|
||||
if s.store == nil {
|
||||
options := storage.DefaultStoreOptions
|
||||
options, err := storage.DefaultStoreOptionsAutoDetectUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options.UIDMap = s.defaultUIDMap
|
||||
options.GIDMap = s.defaultGIDMap
|
||||
store, err := storage.GetStore(options)
|
||||
|
||||
12
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
12
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
// ParseImageName converts a URL-like image name to a types.ImageReference.
|
||||
func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
// Keep this in sync with TransportFromImageName!
|
||||
parts := strings.SplitN(imgName, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
@@ -32,3 +33,14 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
}
|
||||
return transport.ParseReference(parts[1])
|
||||
}
|
||||
|
||||
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
|
||||
// the transport is unknown or when the input is invalid.
|
||||
func TransportFromImageName(imageName string) types.ImageTransport {
|
||||
// Keep this in sync with ParseImageName!
|
||||
parts := strings.SplitN(imageName, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
return transports.Get(parts[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/image/types/types.go
generated
vendored
1
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -401,6 +401,7 @@ type ImageInspectInfo struct {
|
||||
}
|
||||
|
||||
// DockerAuthConfig contains authorization information for connecting to a registry.
|
||||
// the value of Username and Password can be empty for accessing the registry anonymously
|
||||
type DockerAuthConfig struct {
|
||||
Username string
|
||||
Password string
|
||||
|
||||
5
vendor/github.com/containers/image/vendor.conf
generated
vendored
5
vendor/github.com/containers/image/vendor.conf
generated
vendored
@@ -1,7 +1,7 @@
|
||||
github.com/containers/image
|
||||
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/containers/storage master
|
||||
github.com/containers/storage v1.12.2
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
@@ -13,7 +13,6 @@ github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
|
||||
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
@@ -43,7 +42,7 @@ github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
github.com/etcd-io/bbolt v1.3.2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
8
vendor/github.com/containers/image/version/version.go
generated
vendored
8
vendor/github.com/containers/image/version/version.go
generated
vendored
@@ -4,14 +4,14 @@ import "fmt"
|
||||
|
||||
const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 0
|
||||
VersionMajor = 1
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 1
|
||||
VersionMinor = 7
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 5
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
||||
10
vendor/github.com/containers/storage/containers.go
generated
vendored
10
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -71,7 +71,7 @@ type Container struct {
|
||||
type ContainerStore interface {
|
||||
FileBasedStore
|
||||
MetadataStore
|
||||
BigDataStore
|
||||
ContainerBigDataStore
|
||||
FlaggableStore
|
||||
|
||||
// Create creates a container that has a specified ID (or generates a
|
||||
@@ -456,7 +456,7 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
return size, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
if err = r.SetBigData(id, key, data); err == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrContainerUnknown
|
||||
@@ -464,6 +464,8 @@ func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
if size, ok := c.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
} else {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
return -1, ErrSizeUnknown
|
||||
@@ -484,7 +486,7 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
return d, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
if err = r.SetBigData(id, key, data); err == nil {
|
||||
c, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrContainerUnknown
|
||||
@@ -492,6 +494,8 @@ func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
if d, ok := c.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build cgo
|
||||
|
||||
package copy
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
@@ -152,8 +153,8 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
|
||||
isHardlink := false
|
||||
|
||||
switch f.Mode() & os.ModeType {
|
||||
case 0: // Regular file
|
||||
switch mode := f.Mode(); {
|
||||
case mode.IsRegular():
|
||||
id := fileID{dev: stat.Dev, ino: stat.Ino}
|
||||
if copyMode == Hardlink {
|
||||
isHardlink = true
|
||||
@@ -171,12 +172,12 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
copiedFiles[id] = dstPath
|
||||
}
|
||||
|
||||
case os.ModeDir:
|
||||
case mode.IsDir():
|
||||
if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeSymlink:
|
||||
case mode&os.ModeSymlink != 0:
|
||||
link, err := os.Readlink(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -186,14 +187,15 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeNamedPipe:
|
||||
case mode&os.ModeNamedPipe != 0:
|
||||
fallthrough
|
||||
case os.ModeSocket:
|
||||
|
||||
case mode&os.ModeSocket != 0:
|
||||
if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeDevice:
|
||||
case mode&os.ModeDevice != 0:
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
@@ -203,7 +205,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown file type for %s", srcPath)
|
||||
return fmt.Errorf("unknown file type with mode %v for %s", mode, srcPath)
|
||||
}
|
||||
|
||||
// Everything below is copying metadata from src to dst. All this metadata
|
||||
@@ -212,7 +214,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
19
vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
generated
vendored
Normal file
19
vendor/github.com/containers/storage/drivers/copy/copy_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build !linux !cgo
|
||||
|
||||
package copy
|
||||
|
||||
import "github.com/containers/storage/pkg/chrootarchive"
|
||||
|
||||
// Mode indicates whether to use hardlink or copy content
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
// Content creates a new file, and copies the content of the file
|
||||
Content Mode = iota
|
||||
)
|
||||
|
||||
// DirCopy copies or hardlinks the contents of one directory to another,
|
||||
// properly handling soft links
|
||||
func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
|
||||
return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
|
||||
}
|
||||
13
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
@@ -119,10 +119,17 @@ func checkDevHasFS(dev string) error {
|
||||
}
|
||||
|
||||
func verifyBlockDevice(dev string, force bool) error {
|
||||
if err := checkDevAvailable(dev); err != nil {
|
||||
realPath, err := filepath.Abs(dev)
|
||||
if err != nil {
|
||||
return errors.Errorf("unable to get absolute path for %s: %s", dev, err)
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return errors.Errorf("failed to canonicalise path for %s: %s", dev, err)
|
||||
}
|
||||
if err := checkDevAvailable(realPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkDevInVG(dev); err != nil {
|
||||
if err := checkDevInVG(realPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -130,7 +137,7 @@ func verifyBlockDevice(dev string, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := checkDevHasFS(dev); err != nil {
|
||||
if err := checkDevHasFS(realPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
12
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
12
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@@ -796,7 +796,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
|
||||
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
|
||||
mountProgram.Dir = d.home
|
||||
return mountProgram.Run()
|
||||
var b bytes.Buffer
|
||||
mountProgram.Stderr = &b
|
||||
err := mountProgram.Run()
|
||||
if err != nil {
|
||||
output := b.String()
|
||||
if output == "" {
|
||||
output = "<stderr empty>"
|
||||
}
|
||||
return errors.Wrapf(err, "using mount program %s: %s", d.options.mountProgram, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else if len(mountData) > pageSize {
|
||||
//FIXME: We need to figure out to get this to work with additional stores
|
||||
|
||||
31
vendor/github.com/containers/storage/images.go
generated
vendored
31
vendor/github.com/containers/storage/images.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
@@ -117,7 +116,7 @@ type ImageStore interface {
|
||||
ROImageStore
|
||||
RWFileBasedStore
|
||||
RWMetadataStore
|
||||
RWBigDataStore
|
||||
RWImageBigDataStore
|
||||
FlaggableStore
|
||||
|
||||
// Create creates an image that has a specified ID (or a random one) and
|
||||
@@ -595,15 +594,7 @@ func (r *imageStore) BigDataSize(id, key string) (int64, error) {
|
||||
return size, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return -1, ErrImageUnknown
|
||||
}
|
||||
if size, ok := image.BigDataSizes[key]; ok {
|
||||
return size, nil
|
||||
}
|
||||
}
|
||||
return int64(len(data)), nil
|
||||
}
|
||||
return -1, ErrSizeUnknown
|
||||
}
|
||||
@@ -622,17 +613,6 @@ func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||||
if d, ok := image.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||||
if r.SetBigData(id, key, data) == nil {
|
||||
image, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrImageUnknown
|
||||
}
|
||||
if d, ok := image.BigDataDigests[key]; ok {
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", ErrDigestUnknown
|
||||
}
|
||||
|
||||
@@ -655,7 +635,7 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
|
||||
return modified
|
||||
}
|
||||
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||||
if key == "" {
|
||||
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
|
||||
}
|
||||
@@ -672,7 +652,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
}
|
||||
var newDigest digest.Digest
|
||||
if bigDataNameIsManifest(key) {
|
||||
if newDigest, err = manifest.Digest(data); err != nil {
|
||||
if digestManifest == nil {
|
||||
return errors.Wrapf(ErrDigestUnknown, "error digesting manifest: no manifest digest callback provided")
|
||||
}
|
||||
if newDigest, err = digestManifest(data); err != nil {
|
||||
return errors.Wrapf(err, "error digesting manifest")
|
||||
}
|
||||
} else {
|
||||
|
||||
15
vendor/github.com/containers/storage/lockfile.go
generated
vendored
15
vendor/github.com/containers/storage/lockfile.go
generated
vendored
@@ -58,8 +58,17 @@ func GetROLockfile(path string) (Locker, error) {
|
||||
return getLockfile(path, true)
|
||||
}
|
||||
|
||||
// getLockfile is a helper for GetLockfile and GetROLockfile and returns Locker
|
||||
// based on the path and read-only property.
|
||||
// getLockfile returns a Locker object, possibly (depending on the platform)
|
||||
// working inter-process, and associated with the specified path.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func getLockfile(path string, ro bool) (Locker, error) {
|
||||
lockfilesLock.Lock()
|
||||
defer lockfilesLock.Unlock()
|
||||
@@ -79,7 +88,7 @@ func getLockfile(path string, ro bool) (Locker, error) {
|
||||
}
|
||||
return locker, nil
|
||||
}
|
||||
locker, err := getLockFile(path, ro) // platform dependent locker
|
||||
locker, err := createLockerForPath(path, ro) // platform-dependent locker
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
89
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
89
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
@@ -13,34 +13,6 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getLockFile(path string, ro bool) (Locker, error) {
|
||||
var fd int
|
||||
var err error
|
||||
if ro {
|
||||
fd, err = unix.Open(path, os.O_RDONLY, 0)
|
||||
} else {
|
||||
fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening %q", path)
|
||||
}
|
||||
unix.CloseOnExec(fd)
|
||||
|
||||
locktype := unix.F_WRLCK
|
||||
if ro {
|
||||
locktype = unix.F_RDLCK
|
||||
}
|
||||
return &lockfile{
|
||||
stateMutex: &sync.Mutex{},
|
||||
rwMutex: &sync.RWMutex{},
|
||||
file: path,
|
||||
fd: uintptr(fd),
|
||||
lw: stringid.GenerateRandomID(),
|
||||
locktype: int16(locktype),
|
||||
locked: false,
|
||||
ro: ro}, nil
|
||||
}
|
||||
|
||||
type lockfile struct {
|
||||
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
|
||||
rwMutex *sync.RWMutex
|
||||
@@ -55,6 +27,52 @@ type lockfile struct {
|
||||
ro bool
|
||||
}
|
||||
|
||||
// openLock opens the file at path and returns the corresponding file
|
||||
// descriptor. Note that the path is opened read-only when ro is set. If ro
|
||||
// is unset, openLock will open the path read-write and create the file if
|
||||
// necessary.
|
||||
func openLock(path string, ro bool) (int, error) {
|
||||
if ro {
|
||||
return unix.Open(path, os.O_RDONLY, 0)
|
||||
}
|
||||
return unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
|
||||
}
|
||||
|
||||
// createLockerForPath returns a Locker object, possibly (depending on the platform)
|
||||
// working inter-process and associated with the specified path.
|
||||
//
|
||||
// This function will be called at most once for each path value within a single process.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func createLockerForPath(path string, ro bool) (Locker, error) {
|
||||
// Check if we can open the lock.
|
||||
fd, err := openLock(path, ro)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening %q", path)
|
||||
}
|
||||
unix.Close(fd)
|
||||
|
||||
locktype := unix.F_WRLCK
|
||||
if ro {
|
||||
locktype = unix.F_RDLCK
|
||||
}
|
||||
return &lockfile{
|
||||
stateMutex: &sync.Mutex{},
|
||||
rwMutex: &sync.RWMutex{},
|
||||
file: path,
|
||||
lw: stringid.GenerateRandomID(),
|
||||
locktype: int16(locktype),
|
||||
locked: false,
|
||||
ro: ro}, nil
|
||||
}
|
||||
|
||||
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
||||
// command.
|
||||
func (l *lockfile) lock(l_type int16) {
|
||||
@@ -63,7 +81,6 @@ func (l *lockfile) lock(l_type int16) {
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
switch l_type {
|
||||
case unix.F_RDLCK:
|
||||
@@ -74,7 +91,16 @@ func (l *lockfile) lock(l_type int16) {
|
||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
if l.counter == 0 {
|
||||
// If we're the first reference on the lock, we need to open the file again.
|
||||
fd, err := openLock(l.file, l.ro)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error opening %q", l.file))
|
||||
}
|
||||
unix.CloseOnExec(fd)
|
||||
l.fd = uintptr(fd)
|
||||
|
||||
// Optimization: only use the (expensive) fcntl syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
@@ -85,7 +111,6 @@ func (l *lockfile) lock(l_type int16) {
|
||||
l.locktype = l_type
|
||||
l.locked = true
|
||||
l.counter++
|
||||
l.stateMutex.Unlock()
|
||||
}
|
||||
|
||||
// Lock locks the lockfile as a writer. Note that RLock() will be called if
|
||||
@@ -133,6 +158,8 @@ func (l *lockfile) Unlock() {
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
// Close the file descriptor on the last unlock.
|
||||
unix.Close(int(l.fd))
|
||||
}
|
||||
if l.locktype == unix.F_RDLCK {
|
||||
l.rwMutex.RUnlock()
|
||||
@@ -194,7 +221,7 @@ func (l *lockfile) Modified() (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
if n != len(id) {
|
||||
return true, unix.ENOSPC
|
||||
return true, nil
|
||||
}
|
||||
lw := l.lw
|
||||
l.lw = string(id)
|
||||
|
||||
15
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
15
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
@@ -8,7 +8,20 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func getLockFile(path string, ro bool) (Locker, error) {
|
||||
// createLockerForPath returns a Locker object, possibly (depending on the platform)
|
||||
// working inter-process and associated with the specified path.
|
||||
//
|
||||
// This function will be called at most once for each path value within a single process.
|
||||
//
|
||||
// If ro, the lock is a read-write lock and the returned Locker should correspond to the
|
||||
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
|
||||
// or a read-write lock and Locker should correspond to the “lock for writing” (exclusive) operation.
|
||||
//
|
||||
// WARNING:
|
||||
// - The lock may or MAY NOT be inter-process.
|
||||
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
|
||||
// - Even if ro, the lock MAY be exclusive.
|
||||
func createLockerForPath(path string, ro bool) (Locker, error) {
|
||||
return &lockfile{locked: false}, nil
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -636,7 +636,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
if chownOpts == nil {
|
||||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||
if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -130,7 +131,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
||||
18
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/idtools/idtools.go
generated
vendored
@@ -7,6 +7,9 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// IDMap contains a single entry for user namespace range remapping. An array
|
||||
@@ -277,3 +280,18 @@ func parseSubidFile(path, username string) (ranges, error) {
|
||||
}
|
||||
return rangeList, nil
|
||||
}
|
||||
|
||||
func checkChownErr(err error, name string, uid, gid int) error {
|
||||
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
|
||||
return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func SafeChown(name string, uid, gid int) error {
|
||||
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
|
||||
}
|
||||
|
||||
func SafeLchown(name string, uid, gid int) error {
|
||||
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
@@ -30,7 +30,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||
paths = []string{path}
|
||||
} else if err == nil && chownExisting {
|
||||
// short-circuit--we were called with an existing directory and chown was requested
|
||||
return os.Chown(path, ownerUID, ownerGID)
|
||||
return SafeChown(path, ownerUID, ownerGID)
|
||||
} else if err == nil {
|
||||
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||
return nil
|
||||
@@ -60,7 +60,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||
// even if it existed, we will chown the requested path + any subpaths that
|
||||
// didn't exist when we called MkdirAll
|
||||
for _, pathComponent := range paths {
|
||||
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||
if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/storage/pkg/idtools/parser.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/idtools/parser.go
generated
vendored
@@ -2,6 +2,8 @@ package idtools
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -31,10 +33,11 @@ func parseTriple(spec []string) (container, host, size uint32, err error) {
|
||||
|
||||
// ParseIDMap parses idmap triples from string.
|
||||
func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) {
|
||||
stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
|
||||
for _, idMapSpec := range mapSpec {
|
||||
idSpec := strings.Fields(strings.Map(nonDigitsToWhitespace, idMapSpec))
|
||||
if len(idSpec)%3 != 0 {
|
||||
return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
|
||||
return nil, stdErr
|
||||
}
|
||||
for i := range idSpec {
|
||||
if i%3 != 0 {
|
||||
@@ -42,7 +45,11 @@ func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error)
|
||||
}
|
||||
cid, hid, size, err := parseTriple(idSpec[i : i+3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing ID mappings: %s setting is malformed", mapSetting)
|
||||
return nil, stdErr
|
||||
}
|
||||
// Avoid possible integer overflow on 32bit builds
|
||||
if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) {
|
||||
return nil, stdErr
|
||||
}
|
||||
mapping := IDMap{
|
||||
ContainerID: int(cid),
|
||||
|
||||
25
vendor/github.com/containers/storage/pkg/reexec/command_linux.go
generated
vendored
25
vendor/github.com/containers/storage/pkg/reexec/command_linux.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
@@ -20,11 +21,23 @@ func Self() string {
|
||||
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
SysProcAttr: &syscall.SysProcAttr{
|
||||
Pdeathsig: unix.SIGTERM,
|
||||
},
|
||||
cmd := exec.Command(Self())
|
||||
cmd.Args = args
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Pdeathsig: unix.SIGTERM,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CommandContext returns *exec.Cmd which has Path as current binary, and also
|
||||
// sets SysProcAttr.Pdeathsig to SIGTERM.
|
||||
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
|
||||
cmd := exec.CommandContext(ctx, Self())
|
||||
cmd.Args = args
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Pdeathsig: unix.SIGTERM,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
15
vendor/github.com/containers/storage/pkg/reexec/command_unix.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/reexec/command_unix.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
@@ -16,8 +17,14 @@ func Self() string {
|
||||
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
||||
// be set to "/usr/bin/docker".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
cmd := exec.Command(Self())
|
||||
cmd.Args = args
|
||||
return cmd
|
||||
}
|
||||
|
||||
// CommandContext returns *exec.Cmd which has Path as current binary.
|
||||
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
|
||||
cmd := exec.CommandContext(ctx, Self())
|
||||
cmd.Args = args
|
||||
return cmd
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
@@ -10,3 +11,8 @@ import (
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
|
||||
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
|
||||
return nil
|
||||
}
|
||||
|
||||
17
vendor/github.com/containers/storage/pkg/reexec/command_windows.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/reexec/command_windows.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package reexec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
@@ -16,8 +17,16 @@ func Self() string {
|
||||
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||
// be set to "C:\docker.exe".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
cmd := exec.Command(Self())
|
||||
cmd.Args = args
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary.
|
||||
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||
// be set to "C:\docker.exe".
|
||||
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
|
||||
cmd := exec.CommandContext(ctx, Self())
|
||||
cmd.Args = args
|
||||
return cmd
|
||||
}
|
||||
|
||||
95
vendor/github.com/containers/storage/store.go
generated
vendored
95
vendor/github.com/containers/storage/store.go
generated
vendored
@@ -32,7 +32,7 @@ import (
|
||||
|
||||
var (
|
||||
// DefaultStoreOptions is a reasonable default set of options.
|
||||
DefaultStoreOptions StoreOptions
|
||||
defaultStoreOptions StoreOptions
|
||||
stores []*store
|
||||
storesLock sync.Mutex
|
||||
)
|
||||
@@ -102,19 +102,21 @@ type ROBigDataStore interface {
|
||||
BigDataNames(id string) ([]string, error)
|
||||
}
|
||||
|
||||
// A RWBigDataStore wraps up the read-write big-data related methods of the
|
||||
// various types of file-based lookaside stores that we implement.
|
||||
type RWBigDataStore interface {
|
||||
// SetBigData stores a (potentially large) piece of data associated with this
|
||||
// ID.
|
||||
SetBigData(id, key string, data []byte) error
|
||||
// A RWImageBigDataStore wraps up how we store big-data associated with images.
|
||||
type RWImageBigDataStore interface {
|
||||
// SetBigData stores a (potentially large) piece of data associated
|
||||
// with this ID.
|
||||
// Pass github.com/containers/image/manifest.Digest as digestManifest
|
||||
// to allow ByDigest to find images by their correct digests.
|
||||
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
||||
}
|
||||
|
||||
// A BigDataStore wraps up the most common big-data related methods of the
|
||||
// various types of file-based lookaside stores that we implement.
|
||||
type BigDataStore interface {
|
||||
// A ContainerBigDataStore wraps up how we store big-data associated with containers.
|
||||
type ContainerBigDataStore interface {
|
||||
ROBigDataStore
|
||||
RWBigDataStore
|
||||
// SetBigData stores a (potentially large) piece of data associated
|
||||
// with this ID.
|
||||
SetBigData(id, key string, data []byte) error
|
||||
}
|
||||
|
||||
// A FlaggableStore can have flags set and cleared on items which it manages.
|
||||
@@ -352,9 +354,11 @@ type Store interface {
|
||||
// of named data associated with an image.
|
||||
ImageBigDataDigest(id, key string) (digest.Digest, error)
|
||||
|
||||
// SetImageBigData stores a (possibly large) chunk of named data associated
|
||||
// with an image.
|
||||
SetImageBigData(id, key string, data []byte) error
|
||||
// SetImageBigData stores a (possibly large) chunk of named data
|
||||
// associated with an image. Pass
|
||||
// github.com/containers/image/manifest.Digest as digestManifest to
|
||||
// allow ImagesByDigest to find images by their correct digests.
|
||||
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
||||
|
||||
// ImageSize computes the size of the image's layers and ancillary data.
|
||||
ImageSize(id string) (int64, error)
|
||||
@@ -456,6 +460,9 @@ type Store interface {
|
||||
// Version returns version information, in the form of key-value pairs, from
|
||||
// the storage package.
|
||||
Version() ([][2]string, error)
|
||||
|
||||
// GetDigestLock returns digest-specific Locker.
|
||||
GetDigestLock(digest.Digest) (Locker, error)
|
||||
}
|
||||
|
||||
// IDMappingOptions are used for specifying how ID mapping should be set up for
|
||||
@@ -525,6 +532,7 @@ type store struct {
|
||||
imageStore ImageStore
|
||||
roImageStores []ROImageStore
|
||||
containerStore ContainerStore
|
||||
digestLockRoot string
|
||||
}
|
||||
|
||||
// GetStore attempts to find an already-created Store object matching the
|
||||
@@ -546,7 +554,7 @@ type store struct {
|
||||
// }
|
||||
func GetStore(options StoreOptions) (Store, error) {
|
||||
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
|
||||
options = DefaultStoreOptions
|
||||
options = defaultStoreOptions
|
||||
}
|
||||
|
||||
if options.GraphRoot != "" {
|
||||
@@ -694,9 +702,20 @@ func (s *store) load() error {
|
||||
return err
|
||||
}
|
||||
s.containerStore = rcs
|
||||
|
||||
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
|
||||
if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDigestLock returns a digest-specific Locker.
|
||||
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
|
||||
return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
|
||||
}
|
||||
|
||||
func (s *store) getGraphDriver() (drivers.Driver, error) {
|
||||
if s.graphDriver != nil {
|
||||
return s.graphDriver, nil
|
||||
@@ -1019,8 +1038,9 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
|
||||
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
|
||||
}
|
||||
var layer, parentLayer *Layer
|
||||
allStores := append([]ROLayerStore{rlstore}, lstores...)
|
||||
// Locate the image's top layer and its parent, if it has one.
|
||||
for _, s := range append([]ROLayerStore{rlstore}, lstores...) {
|
||||
for _, s := range allStores {
|
||||
store := s
|
||||
if store != rlstore {
|
||||
store.Lock()
|
||||
@@ -1037,10 +1057,13 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea
|
||||
// We want the layer's parent, too, if it has one.
|
||||
var cParentLayer *Layer
|
||||
if cLayer.Parent != "" {
|
||||
// Its parent should be around here, somewhere.
|
||||
if cParentLayer, err = store.Get(cLayer.Parent); err != nil {
|
||||
// Nope, couldn't find it. We're not going to be able
|
||||
// to diff this one properly.
|
||||
// Its parent should be in one of the stores, somewhere.
|
||||
for _, ps := range allStores {
|
||||
if cParentLayer, err = ps.Get(cLayer.Parent); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if cParentLayer == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -1485,7 +1508,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
|
||||
return nil, ErrImageUnknown
|
||||
}
|
||||
|
||||
func (s *store) SetImageBigData(id, key string, data []byte) error {
|
||||
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||||
ristore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1499,7 +1522,7 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
return ristore.SetBigData(id, key, data)
|
||||
return ristore.SetBigData(id, key, data, digestManifest)
|
||||
}
|
||||
|
||||
func (s *store) ImageSize(id string) (int64, error) {
|
||||
@@ -3213,8 +3236,20 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
|
||||
return ret
|
||||
}
|
||||
|
||||
// DefaultConfigFile path to the system wide storage.conf file
|
||||
const DefaultConfigFile = "/etc/containers/storage.conf"
|
||||
// defaultConfigFile path to the system wide storage.conf file
|
||||
const defaultConfigFile = "/etc/containers/storage.conf"
|
||||
|
||||
// DefaultConfigFile returns the path to the storage config file used
|
||||
func DefaultConfigFile(rootless bool) (string, error) {
|
||||
if rootless {
|
||||
home, err := homeDir()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "cannot determine users homedir")
|
||||
}
|
||||
return filepath.Join(home, ".config/containers/storage.conf"), nil
|
||||
}
|
||||
return defaultConfigFile, nil
|
||||
}
|
||||
|
||||
// TOML-friendly explicit tables used for conversions.
|
||||
type tomlConfig struct {
|
||||
@@ -3354,19 +3389,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
|
||||
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
|
||||
DefaultStoreOptions.GraphDriverName = ""
|
||||
defaultStoreOptions.RunRoot = "/var/run/containers/storage"
|
||||
defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
|
||||
defaultStoreOptions.GraphDriverName = ""
|
||||
|
||||
ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions)
|
||||
ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
|
||||
}
|
||||
|
||||
func GetDefaultMountOptions() ([]string, error) {
|
||||
mountOpts := []string{
|
||||
".mountopt",
|
||||
fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
|
||||
fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName),
|
||||
}
|
||||
for _, option := range DefaultStoreOptions.GraphDriverOptions {
|
||||
for _, option := range defaultStoreOptions.GraphDriverOptions {
|
||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
250
vendor/github.com/containers/storage/utils.go
generated
vendored
Normal file
250
vendor/github.com/containers/storage/utils.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
|
||||
func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {
|
||||
options := IDMappingOptions{
|
||||
HostUIDMapping: true,
|
||||
HostGIDMapping: true,
|
||||
}
|
||||
if subGIDMap == "" && subUIDMap != "" {
|
||||
subGIDMap = subUIDMap
|
||||
}
|
||||
if subUIDMap == "" && subGIDMap != "" {
|
||||
subUIDMap = subGIDMap
|
||||
}
|
||||
if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {
|
||||
GIDMapSlice = UIDMapSlice
|
||||
}
|
||||
if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {
|
||||
UIDMapSlice = GIDMapSlice
|
||||
}
|
||||
if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
|
||||
UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
|
||||
}
|
||||
if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
|
||||
GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
|
||||
}
|
||||
|
||||
if subUIDMap != "" && subGIDMap != "" {
|
||||
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
|
||||
}
|
||||
options.UIDMap = mappings.UIDs()
|
||||
options.GIDMap = mappings.GIDs()
|
||||
}
|
||||
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
|
||||
}
|
||||
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
|
||||
}
|
||||
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
|
||||
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
|
||||
if len(options.UIDMap) > 0 {
|
||||
options.HostUIDMapping = false
|
||||
}
|
||||
if len(options.GIDMap) > 0 {
|
||||
options.HostGIDMapping = false
|
||||
}
|
||||
return &options, nil
|
||||
}
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
if runtimeDir == "" {
|
||||
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
|
||||
st, err := system.Stat(tmpDir)
|
||||
if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {
|
||||
return tmpDir, nil
|
||||
}
|
||||
}
|
||||
tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
|
||||
if err := os.MkdirAll(tmpDir, 0700); err != nil {
|
||||
logrus.Errorf("failed to create %s: %v", tmpDir, err)
|
||||
} else {
|
||||
return tmpDir, nil
|
||||
}
|
||||
home, err := homeDir()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
|
||||
}
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "cannot resolve %s", home)
|
||||
}
|
||||
return filepath.Join(resolvedHome, "rundir"), nil
|
||||
}
|
||||
|
||||
// getRootlessDirInfo returns the parent path of where the storage for containers and
|
||||
// volumes will be in rootless mode
|
||||
func getRootlessDirInfo(rootlessUid int) (string, string, error) {
|
||||
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home, err := homeDir()
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
// runc doesn't like symlinks in the rootfs path, and at least
|
||||
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "cannot resolve %s", home)
|
||||
}
|
||||
dataDir = filepath.Join(resolvedHome, ".local", "share")
|
||||
}
|
||||
return dataDir, rootlessRuntime, nil
|
||||
}
|
||||
|
||||
// getRootlessStorageOpts returns the storage opts for containers running as non root
|
||||
func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
|
||||
var opts StoreOptions
|
||||
|
||||
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
opts.RunRoot = rootlessRuntime
|
||||
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
|
||||
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
|
||||
opts.GraphDriverName = "overlay"
|
||||
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
|
||||
} else {
|
||||
opts.GraphDriverName = "vfs"
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
type tomlOptionsConfig struct {
|
||||
MountProgram string `toml:"mount_program"`
|
||||
}
|
||||
|
||||
func getTomlStorage(storeOptions *StoreOptions) *tomlConfig {
|
||||
config := new(tomlConfig)
|
||||
|
||||
config.Storage.Driver = storeOptions.GraphDriverName
|
||||
config.Storage.RunRoot = storeOptions.RunRoot
|
||||
config.Storage.GraphRoot = storeOptions.GraphRoot
|
||||
for _, i := range storeOptions.GraphDriverOptions {
|
||||
s := strings.Split(i, "=")
|
||||
if s[0] == "overlay.mount_program" {
|
||||
config.Storage.Options.MountProgram = s[1]
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func getRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
|
||||
func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
|
||||
uid := getRootlessUID()
|
||||
return DefaultStoreOptions(uid != 0, uid)
|
||||
}
|
||||
|
||||
// DefaultStoreOptions returns the default storage ops for containers
|
||||
func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
|
||||
var (
|
||||
defaultRootlessRunRoot string
|
||||
defaultRootlessGraphRoot string
|
||||
err error
|
||||
)
|
||||
storageOpts := defaultStoreOptions
|
||||
if rootless && rootlessUid != 0 {
|
||||
storageOpts, err = getRootlessStorageOpts(rootlessUid)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
}
|
||||
}
|
||||
|
||||
storageConf, err := DefaultConfigFile(rootless && rootlessUid != 0)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
}
|
||||
if _, err = os.Stat(storageConf); err == nil {
|
||||
defaultRootlessRunRoot = storageOpts.RunRoot
|
||||
defaultRootlessGraphRoot = storageOpts.GraphRoot
|
||||
storageOpts = StoreOptions{}
|
||||
ReloadConfigurationFile(storageConf, &storageOpts)
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
|
||||
}
|
||||
|
||||
if rootless && rootlessUid != 0 {
|
||||
if err == nil {
|
||||
// If the file did not specify a graphroot or runroot,
|
||||
// set sane defaults so we don't try and use root-owned
|
||||
// directories
|
||||
if storageOpts.RunRoot == "" {
|
||||
storageOpts.RunRoot = defaultRootlessRunRoot
|
||||
}
|
||||
if storageOpts.GraphRoot == "" {
|
||||
storageOpts.GraphRoot = defaultRootlessGraphRoot
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
|
||||
return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf))
|
||||
}
|
||||
file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf)
|
||||
}
|
||||
|
||||
tomlConfiguration := getTomlStorage(&storageOpts)
|
||||
defer file.Close()
|
||||
enc := toml.NewEncoder(file)
|
||||
if err := enc.Encode(tomlConfiguration); err != nil {
|
||||
os.Remove(storageConf)
|
||||
|
||||
return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf)
|
||||
}
|
||||
}
|
||||
}
|
||||
return storageOpts, nil
|
||||
}
|
||||
|
||||
func homeDir() (string, error) {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
|
||||
}
|
||||
home = usr.HomeDir
|
||||
}
|
||||
return home, nil
|
||||
}
|
||||
3
vendor/github.com/containers/storage/vendor.conf
generated
vendored
3
vendor/github.com/containers/storage/vendor.conf
generated
vendored
@@ -1,18 +1,15 @@
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
|
||||
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
|
||||
github.com/containers/image master
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/libtrust master
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/opencontainers/go-digest master
|
||||
github.com/opencontainers/image-spec master
|
||||
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/ostreedev/ostree-go master
|
||||
|
||||
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
0
vendor/github.com/boltdb/bolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
190
vendor/github.com/boltdb/bolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
190
vendor/github.com/boltdb/bolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
@@ -1,5 +1,18 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
bbolt
|
||||
=====
|
||||
|
||||
[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
|
||||
[](https://codecov.io/gh/etcd-io/bbolt)
|
||||
[](https://travis-ci.com/etcd-io/bbolt)
|
||||
[](https://godoc.org/github.com/etcd-io/bbolt)
|
||||
[](https://github.com/etcd-io/bbolt/releases)
|
||||
[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
|
||||
|
||||
bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
|
||||
store. The purpose of this fork is to provide the Go community with an active
|
||||
maintenance and development target for Bolt; the goal is improved reliability
|
||||
and stability. bbolt includes bug fixes, performance enhancements, and features
|
||||
not found in Bolt while preserving backwards compatibility with the Bolt API.
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
@@ -10,6 +23,8 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[gh_ben]: https://github.com/benbjohnson
|
||||
[bolt]: https://github.com/boltdb/bolt
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
@@ -21,36 +36,42 @@ consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Project versioning
|
||||
|
||||
bbolt uses [semantic versioning](http://semver.org).
|
||||
API should not change between patch and minor releases.
|
||||
New minor versions may add additional features to the API.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
@@ -59,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
$ go get go.etcd.io/bbolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Importing bbolt
|
||||
|
||||
To use bbolt as an embedded key-value store, import as:
|
||||
|
||||
```go
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
@@ -79,7 +115,7 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -522,7 +558,7 @@ this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
@@ -811,7 +847,7 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
@@ -863,54 +899,56 @@ them via pull request.
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
12
vendor/github.com/etcd-io/bbolt/bolt_mips64x.go
generated
vendored
Normal file
12
vendor/github.com/etcd-io/bbolt/bolt_mips64x.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build mips64 mips64le
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x8000000000 // 512GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
12
vendor/github.com/etcd-io/bbolt/bolt_mipsx.go
generated
vendored
Normal file
12
vendor/github.com/etcd-io/bbolt/bolt_mipsx.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build mips mipsle
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x40000000 // 1GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
@@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
@@ -1,9 +1,12 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
@@ -1,6 +1,6 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user