mirror of
https://github.com/containers/skopeo.git
synced 2025-04-28 03:10:18 +00:00
Merge pull request #600 from nalind/storage-multiple-manifests
Vendor latest github.com/containers/storage
This commit is contained in:
commit
b329dd0d4e
@ -1,3 +1,4 @@
|
||||
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
|
2
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
2
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: containers.go
|
||||
// source: ./containers.go
|
||||
|
||||
package storage
|
||||
|
||||
|
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -253,6 +253,11 @@ func (a *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -490,6 +490,11 @@ func (d *Driver) quotasDirID(id string) string {
|
||||
return path.Join(d.quotasDir(), id)
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -123,6 +123,11 @@ func (d *Driver) Cleanup() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -72,6 +72,9 @@ type ProtoDriver interface {
|
||||
// specified id and parent and options passed in opts. Parent
|
||||
// may be "" and opts may be nil.
|
||||
Create(id, parent string, opts *CreateOpts) error
|
||||
// CreateFromTemplate creates a new filesystem layer with the specified id
|
||||
// and parent, with contents identical to the specified template layer.
|
||||
CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error
|
||||
// Remove attempts to remove the filesystem layer with this id.
|
||||
Remove(id string) error
|
||||
// Get returns the mountpoint for the layered filesystem referred
|
||||
|
66
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
66
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
@ -10,6 +10,8 @@ import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -57,10 +59,11 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
}
|
||||
|
||||
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
|
||||
if mountOpts != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, mountOpts)
|
||||
flags, data := mount.ParseOptions(mountOpts)
|
||||
if data != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, data)
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
|
||||
return errors.Wrap(err, "failed to mount overlay")
|
||||
}
|
||||
defer func() {
|
||||
@ -103,3 +106,60 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doesMetacopy checks if the filesystem is going to optimize changes to
|
||||
// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid
|
||||
// copying up a file from a lower layer unless/until its contents are being
|
||||
// modified
|
||||
func doesMetacopy(d, mountOpts string) (bool, error) {
|
||||
td, err := ioutil.TempDir(d, "metacopy-check")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(td); err != nil {
|
||||
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make directories l1, l2, work, merged
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Mount using the mandatory options and configured options
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work"))
|
||||
flags, data := mount.ParseOptions(mountOpts)
|
||||
if data != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, data)
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
|
||||
return false, errors.Wrap(err, "failed to mount overlay for metacopy check")
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
|
||||
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
}
|
||||
}()
|
||||
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
|
||||
// as a metadata-only copy
|
||||
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
|
||||
return false, errors.Wrap(err, "error changing permissions on file for metacopy check")
|
||||
}
|
||||
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), "trusted.overlay.metacopy")
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer")
|
||||
}
|
||||
return metacopy != nil, nil
|
||||
}
|
||||
|
57
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
57
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/overlayutils"
|
||||
@ -84,13 +85,12 @@ const (
|
||||
)
|
||||
|
||||
type overlayOptions struct {
|
||||
overrideKernelCheck bool
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
mountOptions string
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
@ -104,6 +104,7 @@ type Driver struct {
|
||||
options overlayOptions
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
supportsDType bool
|
||||
usingMetacopy bool
|
||||
locker *locker.Locker
|
||||
convert map[string]bool
|
||||
}
|
||||
@ -157,6 +158,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var usingMetacopy bool
|
||||
var supportsDType bool
|
||||
if opts.mountProgram != "" {
|
||||
supportsDType = true
|
||||
@ -165,8 +167,23 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
patherr, ok := err.(*os.PathError)
|
||||
if ok && patherr.Err == syscall.ENOSPC {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
}
|
||||
usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
|
||||
if err == nil {
|
||||
if usingMetacopy {
|
||||
logrus.Debugf("overlay test mount indicated that metacopy is being used")
|
||||
} else {
|
||||
logrus.Debugf("overlay test mount indicated that metacopy is not being used")
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.skipMountHome {
|
||||
@ -188,6 +205,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
gidMaps: gidMaps,
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
||||
supportsDType: supportsDType,
|
||||
usingMetacopy: usingMetacopy,
|
||||
locker: locker.New(),
|
||||
options: *opts,
|
||||
convert: make(map[string]bool),
|
||||
@ -207,7 +225,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
||||
}
|
||||
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff())
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
@ -221,12 +239,6 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||
logrus.Debugf("overlay: override_kernelcheck=%s", val)
|
||||
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
|
||||
o.mountOptions = val
|
||||
case ".size", "overlay.size", "overlay2.size":
|
||||
@ -285,6 +297,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
if err != nil {
|
||||
patherr, ok := err.(*os.PathError)
|
||||
if ok && patherr.Err == syscall.ENOSPC {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// Check if reading the directory's contents populates the d_type field, which is required
|
||||
// for proper operation of the overlay filesystem.
|
||||
@ -364,6 +382,7 @@ func (d *Driver) Status() [][2]string {
|
||||
{"Backing Filesystem", backingFs},
|
||||
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
|
||||
{"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())},
|
||||
{"Using metacopy", strconv.FormatBool(d.usingMetacopy)},
|
||||
}
|
||||
}
|
||||
|
||||
@ -399,6 +418,14 @@ func (d *Driver) Cleanup() error {
|
||||
return mount.Unmount(d.home)
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
if readWrite {
|
||||
return d.CreateReadWrite(id, template, opts)
|
||||
}
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@ -782,6 +809,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
mountTarget = path.Join(id, "merged")
|
||||
}
|
||||
flags, data := mount.ParseOptions(mountData)
|
||||
logrus.Debugf("overlay: mount_data=%s", mountData)
|
||||
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
|
||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
||||
}
|
||||
@ -975,6 +1003,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
// Mount the new layer and handle ownership changes and possible copy_ups in it.
|
||||
options := graphdriver.MountOpts{
|
||||
MountLabel: mountLabel,
|
||||
Options: strings.Split(d.options.mountOptions, ","),
|
||||
}
|
||||
layerFs, err := d.get(id, true, options)
|
||||
if err != nil {
|
||||
|
45
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
Normal file
45
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
// TemplateDriver is just barely enough of a driver that we can implement a
|
||||
// naive version of CreateFromTemplate on top of it.
|
||||
type TemplateDriver interface {
|
||||
DiffDriver
|
||||
CreateReadWrite(id, parent string, opts *CreateOpts) error
|
||||
Create(id, parent string, opts *CreateOpts) error
|
||||
Remove(id string) error
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as
|
||||
// another layer. Internally, it may even depend on that other layer
|
||||
// continuing to exist, as if it were actually a child of the child layer.
|
||||
func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error {
|
||||
var err error
|
||||
if readWrite {
|
||||
err = d.CreateReadWrite(id, parent, opts)
|
||||
} else {
|
||||
err = d.Create(id, parent, opts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
|
||||
if err != nil {
|
||||
if err2 := d.Remove(id); err2 != nil {
|
||||
logrus.Errorf("error removing layer %q: %v", id, err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if _, err = d.ApplyDiff(id, templateIDMappings, parent, opts.MountLabel, diff); err != nil {
|
||||
if err2 := d.Remove(id); err2 != nil {
|
||||
logrus.Errorf("error removing layer %q: %v", id, err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -99,6 +99,14 @@ func (d *Driver) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
if readWrite {
|
||||
return d.CreateReadWrite(id, template, opts)
|
||||
}
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
67
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
67
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux freebsd solaris
|
||||
// +build linux freebsd
|
||||
|
||||
package zfs
|
||||
|
||||
@ -16,7 +16,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -38,7 +38,7 @@ type Logger struct{}
|
||||
|
||||
// Log wraps log message from ZFS driver with a prefix '[zfs]'.
|
||||
func (*Logger) Log(cmd []string) {
|
||||
logrus.Debugf("[zfs] %s", strings.Join(cmd, " "))
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " "))
|
||||
}
|
||||
|
||||
// Init returns a new ZFS driver.
|
||||
@ -47,14 +47,16 @@ func (*Logger) Log(cmd []string) {
|
||||
func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||
var err error
|
||||
|
||||
logger := logrus.WithField("storage-driver", "zfs")
|
||||
|
||||
if _, err := exec.LookPath("zfs"); err != nil {
|
||||
logrus.Debugf("[zfs] zfs command is not available: %v", err)
|
||||
logger.Debugf("zfs command is not available: %v", err)
|
||||
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
|
||||
}
|
||||
|
||||
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
|
||||
logger.Debugf("cannot open /dev/zfs: %v", err)
|
||||
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
@ -109,9 +111,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
|
||||
return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
|
||||
}
|
||||
|
||||
if err := mount.MakePrivate(base); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &Driver{
|
||||
dataset: rootDataset,
|
||||
options: options,
|
||||
@ -157,7 +156,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
|
||||
}
|
||||
for _, m := range mounts {
|
||||
if err := unix.Stat(m.Mountpoint, &stat); err != nil {
|
||||
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
||||
continue // may fail on fuse file systems
|
||||
}
|
||||
|
||||
@ -184,7 +183,7 @@ func (d *Driver) String() string {
|
||||
return "zfs"
|
||||
}
|
||||
|
||||
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
|
||||
// Cleanup is called on when program exits, it is a no-op for ZFS.
|
||||
func (d *Driver) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
@ -260,6 +259,11 @@ func (d *Driver) mountPath(id string) string {
|
||||
return path.Join(d.options.mountPath, "graph", getMountpoint(id))
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@ -360,11 +364,25 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
|
||||
mountpoint := d.mountPath(id)
|
||||
if count := d.ctr.Increment(mountpoint); count > 1 {
|
||||
return mountpoint, nil
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
if c := d.ctr.Decrement(mountpoint); c <= 0 {
|
||||
if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil {
|
||||
logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr)
|
||||
}
|
||||
if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) {
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mountOptions := d.options.mountOptions
|
||||
if len(options.Options) > 0 {
|
||||
@ -373,29 +391,24 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
|
||||
filesystem := d.zfsPath(id)
|
||||
opts := label.FormatMountLabel(mountOptions, options.MountLabel)
|
||||
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
}
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
|
||||
return "", errors.Wrap(err, "error creating zfs mount")
|
||||
}
|
||||
|
||||
// this could be our first mount after creation of the filesystem, and the root dir may still have root
|
||||
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
|
||||
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
|
||||
mount.Unmount(mountpoint)
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
|
||||
}
|
||||
|
||||
@ -408,16 +421,18 @@ func (d *Driver) Put(id string) error {
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
|
||||
if err != nil || !mounted {
|
||||
return err
|
||||
|
||||
logger := logrus.WithField("storage-driver", "zfs")
|
||||
|
||||
logger.Debugf(`unmount("%s")`, mountpoint)
|
||||
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
||||
logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
|
||||
}
|
||||
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
|
||||
logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err)
|
||||
}
|
||||
|
||||
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
|
||||
|
||||
if err := mount.Unmount(mountpoint); err != nil {
|
||||
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
@ -18,7 +18,7 @@ func checkRootdirFs(rootdir string) error {
|
||||
|
||||
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
|
||||
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
}
|
||||
|
||||
|
21
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
@ -1,23 +1,24 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func checkRootdirFs(rootdir string) error {
|
||||
var buf unix.Statfs_t
|
||||
if err := unix.Statfs(rootdir, &buf); err != nil {
|
||||
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
||||
func checkRootdirFs(rootDir string) error {
|
||||
fsMagic, err := graphdriver.GetFSMagic(rootDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backingFS := "unknown"
|
||||
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
|
||||
backingFS = fsName
|
||||
}
|
||||
|
||||
if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
if fsMagic != graphdriver.FsMagicZfs {
|
||||
logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
59
vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
generated
vendored
59
vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
// +build solaris,cgo
|
||||
|
||||
package zfs
|
||||
|
||||
/*
|
||||
#include <sys/statvfs.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline struct statvfs *getstatfs(char *s) {
|
||||
struct statvfs *buf;
|
||||
int err;
|
||||
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
|
||||
err = statvfs(s, buf);
|
||||
return buf;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func checkRootdirFs(rootdir string) error {
|
||||
|
||||
cs := C.CString(filepath.Dir(rootdir))
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
buf := C.getstatfs(cs)
|
||||
defer C.free(unsafe.Pointer(buf))
|
||||
|
||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
||||
(buf.f_basetype[3] != 0) {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* rootfs is introduced to comply with the OCI spec
|
||||
which states that root filesystem must be mounted at <CID>/rootfs/ instead of <CID>/
|
||||
*/
|
||||
func getMountpoint(id string) string {
|
||||
maxlen := 12
|
||||
|
||||
// we need to preserve filesystem suffix
|
||||
suffix := strings.SplitN(id, "-", 2)
|
||||
|
||||
if len(suffix) > 1 {
|
||||
return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root")
|
||||
}
|
||||
|
||||
return filepath.Join(id[:maxlen], "rootfs", "root")
|
||||
}
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !linux,!freebsd,!solaris
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package zfs
|
||||
|
||||
|
197
vendor/github.com/containers/storage/images.go
generated
vendored
197
vendor/github.com/containers/storage/images.go
generated
vendored
@ -5,8 +5,10 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
@ -15,9 +17,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// ImageDigestBigDataKey is the name of the big data item whose
|
||||
// contents we consider useful for computing a "digest" of the
|
||||
// image, by which we can locate the image later.
|
||||
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
|
||||
// names which we consider to be manifests, used for computing a
|
||||
// "digest" value for the image as a whole, by which we can locate the
|
||||
// image later.
|
||||
ImageDigestManifestBigDataNamePrefix = "manifest"
|
||||
// ImageDigestBigDataKey is provided for compatibility with older
|
||||
// versions of the image library. It will be removed in the future.
|
||||
ImageDigestBigDataKey = "manifest"
|
||||
)
|
||||
|
||||
@ -27,12 +33,19 @@ type Image struct {
|
||||
// value which was generated by the library.
|
||||
ID string `json:"id"`
|
||||
|
||||
// Digest is a digest value that we can use to locate the image.
|
||||
// Digest is a digest value that we can use to locate the image, if one
|
||||
// was specified at creation-time.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// Digests is a list of digest values of the image's manifests, and
|
||||
// possibly a manually-specified value, that we can use to locate the
|
||||
// image. If Digest is set, its value is also in this list.
|
||||
Digests []digest.Digest `json:"-"`
|
||||
|
||||
// Names is an optional set of user-defined convenience values. The
|
||||
// image can be referred to by its ID or any of its names. Names are
|
||||
// unique among images.
|
||||
// unique among images, and are often the text representation of tagged
|
||||
// or canonical references.
|
||||
Names []string `json:"names,omitempty"`
|
||||
|
||||
// TopLayer is the ID of the topmost layer of the image itself, if the
|
||||
@ -42,7 +55,9 @@ type Image struct {
|
||||
|
||||
// MappedTopLayers are the IDs of alternate versions of the top layer
|
||||
// which have the same contents and parent, and which differ from
|
||||
// TopLayer only in which ID mappings they use.
|
||||
// TopLayer only in which ID mappings they use. When the image is
|
||||
// to be removed, they should be removed before the TopLayer, as the
|
||||
// graph driver may depend on that.
|
||||
MappedTopLayers []string `json:"mapped-layers,omitempty"`
|
||||
|
||||
// Metadata is data we keep for the convenience of the caller. It is not
|
||||
@ -90,8 +105,10 @@ type ROImageStore interface {
|
||||
// Images returns a slice enumerating the known images.
|
||||
Images() ([]Image, error)
|
||||
|
||||
// Images returns a slice enumerating the images which have a big data
|
||||
// item with the name ImageDigestBigDataKey and the specified digest.
|
||||
// ByDigest returns a slice enumerating the images which have either an
|
||||
// explicitly-set digest, or a big data item with a name that starts
|
||||
// with ImageDigestManifestBigDataNamePrefix, which matches the
|
||||
// specified digest.
|
||||
ByDigest(d digest.Digest) ([]*Image, error)
|
||||
}
|
||||
|
||||
@ -109,7 +126,8 @@ type ImageStore interface {
|
||||
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
||||
|
||||
// SetNames replaces the list of names associated with an image with the
|
||||
// supplied values.
|
||||
// supplied values. The values are expected to be valid normalized
|
||||
// named image references.
|
||||
SetNames(id string, names []string) error
|
||||
|
||||
// Delete removes the record of the image.
|
||||
@ -133,6 +151,7 @@ func copyImage(i *Image) *Image {
|
||||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
@ -145,6 +164,17 @@ func copyImage(i *Image) *Image {
|
||||
}
|
||||
}
|
||||
|
||||
func copyImageSlice(slice []*Image) []*Image {
|
||||
if len(slice) > 0 {
|
||||
cp := make([]*Image, len(slice))
|
||||
for i := range slice {
|
||||
cp[i] = copyImage(slice[i])
|
||||
}
|
||||
return cp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
images := make([]Image, len(r.images))
|
||||
for i := range r.images {
|
||||
@ -165,6 +195,46 @@ func (r *imageStore) datapath(id, key string) string {
|
||||
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||||
}
|
||||
|
||||
// bigDataNameIsManifest determines if a big data item with the specified name
|
||||
// is considered to be representative of the image, in that its digest can be
|
||||
// said to also be the image's digest. Currently, if its name is, or begins
|
||||
// with, "manifest", we say that it is.
|
||||
func bigDataNameIsManifest(name string) bool {
|
||||
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
|
||||
}
|
||||
|
||||
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
|
||||
// list of the unique values that would identify the image.
|
||||
func (image *Image) recomputeDigests() error {
|
||||
validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
|
||||
digests := make(map[digest.Digest]struct{})
|
||||
if image.Digest != "" {
|
||||
if err := image.Digest.Validate(); err != nil {
|
||||
return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
|
||||
}
|
||||
digests[image.Digest] = struct{}{}
|
||||
validDigests = append(validDigests, image.Digest)
|
||||
}
|
||||
for name, digest := range image.BigDataDigests {
|
||||
if !bigDataNameIsManifest(name) {
|
||||
continue
|
||||
}
|
||||
if digest.Validate() != nil {
|
||||
return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
|
||||
}
|
||||
// Deduplicate the digest values.
|
||||
if _, known := digests[digest]; !known {
|
||||
digests[digest] = struct{}{}
|
||||
validDigests = append(validDigests, digest)
|
||||
}
|
||||
}
|
||||
if image.Digest == "" && len(validDigests) > 0 {
|
||||
image.Digest = validDigests[0]
|
||||
}
|
||||
image.Digests = validDigests
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Load() error {
|
||||
shouldSave := false
|
||||
rpath := r.imagespath()
|
||||
@ -187,17 +257,18 @@ func (r *imageStore) Load() error {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
}
|
||||
names[name] = images[n]
|
||||
}
|
||||
// Implicit digest
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
digests[digest] = append(digests[digest], images[n])
|
||||
// Compute the digest list.
|
||||
err = image.recomputeDigests()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
|
||||
}
|
||||
// Explicit digest
|
||||
if image.Digest == "" {
|
||||
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
|
||||
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
|
||||
digests[image.Digest] = append(digests[image.Digest], images[n])
|
||||
for _, name := range image.Names {
|
||||
names[name] = image
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
list := digests[digest]
|
||||
digests[digest] = append(list, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -331,12 +402,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
}
|
||||
}
|
||||
if _, idInUse := r.byid[id]; idInUse {
|
||||
return nil, ErrDuplicateID
|
||||
return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
|
||||
}
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, ErrDuplicateName
|
||||
if image, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
|
||||
}
|
||||
}
|
||||
if created.IsZero() {
|
||||
@ -346,6 +417,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
image = &Image{
|
||||
ID: id,
|
||||
Digest: searchableDigest,
|
||||
Digests: nil,
|
||||
Names: names,
|
||||
TopLayer: layer,
|
||||
Metadata: metadata,
|
||||
@ -355,16 +427,20 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
Created: created,
|
||||
Flags: make(map[string]interface{}),
|
||||
}
|
||||
err := image.recomputeDigests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error validating digests for new image")
|
||||
}
|
||||
r.images = append(r.images, image)
|
||||
r.idindex.Add(id)
|
||||
r.byid[id] = image
|
||||
if searchableDigest != "" {
|
||||
list := r.bydigest[searchableDigest]
|
||||
r.bydigest[searchableDigest] = append(list, image)
|
||||
}
|
||||
for _, name := range names {
|
||||
r.byname[name] = image
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
list := r.bydigest[digest]
|
||||
r.bydigest[digest] = append(list, image)
|
||||
}
|
||||
err = r.Save()
|
||||
image = copyImage(image)
|
||||
}
|
||||
@ -442,6 +518,14 @@ func (r *imageStore) Delete(id string) error {
|
||||
for _, name := range image.Names {
|
||||
delete(r.byname, name)
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the image at toDeleteIndex
|
||||
if toDeleteIndex == len(r.images)-1 {
|
||||
@ -450,28 +534,6 @@ func (r *imageStore) Delete(id string) error {
|
||||
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
// remove the image from the digest-based index
|
||||
if list, ok := r.bydigest[digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if image.Digest != "" {
|
||||
// remove the image's hard-coded digest from the digest-based index
|
||||
if list, ok := r.bydigest[image.Digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, image.Digest)
|
||||
} else {
|
||||
r.bydigest[image.Digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := r.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -502,7 +564,7 @@ func (r *imageStore) Exists(id string) bool {
|
||||
|
||||
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
|
||||
if images, ok := r.bydigest[d]; ok {
|
||||
return images, nil
|
||||
return copyImageSlice(images), nil
|
||||
}
|
||||
return nil, ErrImageUnknown
|
||||
}
|
||||
@ -604,10 +666,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
if !ok {
|
||||
return ErrImageUnknown
|
||||
}
|
||||
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
|
||||
err := os.MkdirAll(r.datadir(image.ID), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
var newDigest digest.Digest
|
||||
if bigDataNameIsManifest(key) {
|
||||
if newDigest, err = manifest.Digest(data); err != nil {
|
||||
return errors.Wrapf(err, "error digesting manifest")
|
||||
}
|
||||
} else {
|
||||
newDigest = digest.Canonical.FromBytes(data)
|
||||
}
|
||||
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
if err == nil {
|
||||
save := false
|
||||
if image.BigDataSizes == nil {
|
||||
@ -619,7 +690,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
image.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
oldDigest, digestOk := image.BigDataDigests[key]
|
||||
newDigest := digest.Canonical.FromBytes(data)
|
||||
image.BigDataDigests[key] = newDigest
|
||||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
@ -635,20 +705,21 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
if key == ImageDigestBigDataKey {
|
||||
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
|
||||
// remove the image from the list of images in the digest-based
|
||||
// index which corresponds to the old digest for this item, unless
|
||||
// it's also the hard-coded digest
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
r.bydigest[oldDigest] = prunedList
|
||||
}
|
||||
for _, oldDigest := range image.Digests {
|
||||
// remove the image from the list of images in the digest-based index
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
r.bydigest[oldDigest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = image.recomputeDigests(); err != nil {
|
||||
return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
|
||||
}
|
||||
for _, newDigest := range image.Digests {
|
||||
// add the image to the list of images in the digest-based index which
|
||||
// corresponds to the new digest for this item, unless it's already there
|
||||
list := r.bydigest[newDigest]
|
||||
|
44
vendor/github.com/containers/storage/layers.go
generated
vendored
44
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -551,9 +551,20 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
||||
}
|
||||
}
|
||||
parent := ""
|
||||
var parentMappings *idtools.IDMappings
|
||||
if parentLayer != nil {
|
||||
parent = parentLayer.ID
|
||||
}
|
||||
var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
|
||||
if !ok {
|
||||
return nil, -1, ErrLayerUnknown
|
||||
}
|
||||
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
|
||||
} else {
|
||||
templateIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if parentLayer != nil {
|
||||
parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
|
||||
} else {
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
@ -566,23 +577,34 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
||||
MountLabel: mountLabel,
|
||||
StorageOpt: options,
|
||||
}
|
||||
if writeable {
|
||||
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
|
||||
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer")
|
||||
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer)
|
||||
}
|
||||
oldMappings = templateIDMappings
|
||||
} else {
|
||||
if err = r.driver.Create(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
|
||||
if writeable {
|
||||
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer")
|
||||
}
|
||||
} else {
|
||||
if err = r.driver.Create(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer")
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer")
|
||||
}
|
||||
oldMappings = parentMappings
|
||||
}
|
||||
if !reflect.DeepEqual(parentMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(parentMappings.GIDs(), idMappings.GIDs()) {
|
||||
if err = r.driver.UpdateLayerIDMap(id, parentMappings, idMappings, mountLabel); err != nil {
|
||||
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
|
||||
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
|
||||
// We don't have a record of this layer, but at least
|
||||
// try to clean it up underneath us.
|
||||
r.driver.Remove(id)
|
||||
|
2
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
2
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: layers.go
|
||||
// source: ./layers.go
|
||||
|
||||
package storage
|
||||
|
||||
|
107
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
107
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
@ -23,6 +24,7 @@ import (
|
||||
"github.com/containers/storage/pkg/system"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -1331,3 +1333,108 @@ const (
|
||||
// HeaderSize is the size in bytes of a tar header
|
||||
HeaderSize = 512
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
|
||||
}
|
||||
|
||||
// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends
|
||||
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver {
|
||||
if tarIDMappings == nil {
|
||||
tarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if untarIDMappings == nil {
|
||||
untarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
// of any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
var hashError error
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := tar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
}
|
||||
if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
|
||||
hashError = err
|
||||
}
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archiver.CopyFileWithTar
|
||||
}
|
||||
|
||||
// CopyWithTarAndChown returns a function which copies a directory tree from outside of
|
||||
// any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.CopyWithTar
|
||||
}
|
||||
|
||||
// UntarPathAndChown returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.UntarPath
|
||||
}
|
||||
|
||||
// TarPath returns a function which creates an archive of a specified
|
||||
// location in the container's filesystem, mapping permissions using the
|
||||
// container's ID maps
|
||||
func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) {
|
||||
tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
return func(path string) (io.ReadCloser, error) {
|
||||
return TarWithOptions(path, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
UIDMaps: tarMappings.UIDs(),
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
92
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
92
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@ -1,34 +1,32 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
|
||||
archiver := archive.NewArchiver(idMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends
|
||||
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if tarIDMappings == nil {
|
||||
tarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if untarIDMappings == nil {
|
||||
untarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
|
||||
archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
@ -81,3 +79,75 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
|
||||
return invokeUnpack(r, dest, options)
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
// of any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
var hashError error
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := tar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
}
|
||||
if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
|
||||
hashError = err
|
||||
}
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archiver.CopyFileWithTar
|
||||
}
|
||||
|
||||
// CopyWithTarAndChown returns a function which copies a directory tree from outside of
|
||||
// any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.CopyWithTar
|
||||
}
|
||||
|
||||
// UntarPathAndChown returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.UntarPath
|
||||
}
|
||||
|
96
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
96
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
package config
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
type ThinpoolOptionsConfig struct {
|
||||
// AutoExtendPercent determines the amount by which pool needs to be
|
||||
// grown. This is specified in terms of % of pool size. So a value of
|
||||
// 20 means that when threshold is hit, pool will be grown by 20% of
|
||||
// existing pool size.
|
||||
AutoExtendPercent string `toml:"autoextend_percent"`
|
||||
|
||||
// AutoExtendThreshold determines the pool extension threshold in terms
|
||||
// of percentage of pool size. For example, if threshold is 60, that
|
||||
// means when pool is 60% full, threshold has been hit.
|
||||
AutoExtendThreshold string `toml:"autoextend_threshold"`
|
||||
|
||||
// BaseSize specifies the size to use when creating the base device,
|
||||
// which limits the size of images and containers.
|
||||
BaseSize string `toml:"basesize"`
|
||||
|
||||
// BlockSize specifies a custom blocksize to use for the thin pool.
|
||||
BlockSize string `toml:"blocksize"`
|
||||
|
||||
// DirectLvmDevice specifies a custom block storage device to use for
|
||||
// the thin pool.
|
||||
DirectLvmDevice string `toml:"directlvm_device"`
|
||||
|
||||
// DirectLvmDeviceForcewipes device even if device already has a
|
||||
// filesystem
|
||||
DirectLvmDeviceForce string `toml:"directlvm_device_force"`
|
||||
|
||||
// Fs specifies the filesystem type to use for the base device.
|
||||
Fs string `toml:"fs"`
|
||||
|
||||
// log_level sets the log level of devicemapper.
|
||||
LogLevel string `toml:"log_level"`
|
||||
|
||||
// MinFreeSpace specifies the min free space percent in a thin pool
|
||||
// require for new device creation to
|
||||
MinFreeSpace string `toml:"min_free_space"`
|
||||
|
||||
// MkfsArg specifies extra mkfs arguments to be used when creating the
|
||||
// basedevice.
|
||||
MkfsArg string `toml:"mkfsarg"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting the thin
|
||||
// devices.
|
||||
MountOpt string `toml:"mountopt"`
|
||||
|
||||
// UseDeferredDeletion marks device for deferred deletion
|
||||
UseDeferredDeletion string `toml:"use_deferred_deletion"`
|
||||
|
||||
// UseDeferredRemoval marks device for deferred removal
|
||||
UseDeferredRemoval string `toml:"use_deferred_removal"`
|
||||
|
||||
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
|
||||
// retries XFS should attempt to complete IO when ENOSPC (no space)
|
||||
// error is returned by underlying storage device.
|
||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
|
||||
}
|
||||
|
||||
// OptionsConfig represents the "storage.options" TOML config table.
|
||||
type OptionsConfig struct {
|
||||
// AdditionalImagesStores is the location of additional read/only
|
||||
// Image stores. Usually used to access Networked File System
|
||||
// for shared image content
|
||||
AdditionalImageStores []string `toml:"additionalimagestores"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
|
||||
// RemapUIDs is a list of default UID mappings to use for layers.
|
||||
RemapUIDs string `toml:"remap-uids"`
|
||||
// RemapGIDs is a list of default GID mappings to use for layers.
|
||||
RemapGIDs string `toml:"remap-gids"`
|
||||
|
||||
// RemapUser is the name of one or more entries in /etc/subuid which
|
||||
// should be used to set up default UID mappings.
|
||||
RemapUser string `toml:"remap-user"`
|
||||
// RemapGroup is the name of one or more entries in /etc/subgid which
|
||||
// should be used to set up default GID mappings.
|
||||
RemapGroup string `toml:"remap-group"`
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
|
||||
// OSTree repository
|
||||
OstreeRepo string `toml:"ostree_repo"`
|
||||
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home"`
|
||||
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
}
|
506
vendor/github.com/containers/storage/store.go
generated
vendored
506
vendor/github.com/containers/storage/store.go
generated
vendored
File diff suppressed because it is too large
Load Diff
11
vendor/github.com/containers/storage/vendor.conf
generated
vendored
11
vendor/github.com/containers/storage/vendor.conf
generated
vendored
@ -1,14 +1,20 @@
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
|
||||
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
|
||||
github.com/containers/image master
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/libtrust master
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/opencontainers/go-digest master
|
||||
github.com/opencontainers/image-spec master
|
||||
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
|
||||
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/ostreedev/ostree-go master
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
github.com/pkg/errors master
|
||||
@ -23,6 +29,3 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
gotest.tools master
|
||||
github.com/google/go-cmp master
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
Loading…
Reference in New Issue
Block a user