Update image-tools, and remove the duplicate Sirupsen/logrus vendor

This commit is contained in:
Miloslav Trmač
2017-09-27 00:43:23 +02:00
parent 40a5f48632
commit 700199c944
290 changed files with 39444 additions and 13093 deletions

View File

@@ -10,6 +10,8 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// A Container is a reference to a read-write layer with metadata.
@@ -44,6 +46,10 @@ type Container struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this container was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
@@ -133,6 +139,7 @@ func (r *containerStore) Load() error {
ids := make(map[string]*Container)
names := make(map[string]*Container)
if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(containers))
for n, container := range containers {
idlist = append(idlist, container.ID)
ids[container.ID] = containers[n]
@@ -223,6 +230,9 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
if !ok {
return ErrContainerUnknown
}
if container.Flags == nil {
container.Flags = make(map[string]interface{})
}
container.Flags[flag] = value
return r.Save()
}
@@ -247,15 +257,16 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
}
if err == nil {
container = &Container{
ID: id,
Names: names,
ImageID: image,
LayerID: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
Created: time.Now().UTC(),
Flags: make(map[string]interface{}),
ID: id,
Names: names,
ImageID: image,
LayerID: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: time.Now().UTC(),
Flags: make(map[string]interface{}),
}
r.containers = append(r.containers, container)
r.byid[id] = container
@@ -362,6 +373,9 @@ func (r *containerStore) Exists(id string) bool {
}
func (r *containerStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name")
}
c, ok := r.lookup(id)
if !ok {
return nil, ErrContainerUnknown
@@ -370,16 +384,61 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
}
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name")
}
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
c, ok := r.lookup(id)
if !ok {
return -1, ErrContainerUnknown
}
if size, ok := c.BigDataSizes[key]; ok {
return size, nil
}
}
}
return -1, ErrSizeUnknown
}
func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name")
}
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
c, ok := r.lookup(id)
if !ok {
return "", ErrContainerUnknown
}
if d, ok := c.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown
}
func (r *containerStore) BigDataNames(id string) ([]string, error) {
c, ok := r.lookup(id)
if !ok {
@@ -389,6 +448,9 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
}
func (r *containerStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item")
}
c, ok := r.lookup(id)
if !ok {
return ErrContainerUnknown
@@ -399,19 +461,28 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600)
if err == nil {
save := false
oldSize, ok := c.BigDataSizes[key]
if c.BigDataSizes == nil {
c.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := c.BigDataSizes[key]
c.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != c.BigDataSizes[key] {
if c.BigDataDigests == nil {
c.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := c.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
c.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
add := true
addName := true
for _, name := range c.BigDataNames {
if name == key {
add = false
addName = false
break
}
}
if add {
if addName {
c.BigDataNames = append(c.BigDataNames, key)
save = true
}
@@ -423,7 +494,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
}
func (r *containerStore) Wipe() error {
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1194
vendor/github.com/containers/storage/containers_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,7 @@ import (
"os"
"path"
"path/filepath"
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
@@ -15,10 +16,11 @@ import (
"golang.org/x/sys/unix"
)
// hasOpaqueCopyUpBug checks whether the filesystem has a bug
// doesSupportNativeDiff checks whether the filesystem has a bug
// which copies up the opaque flag when copying up an opaque
// directory. When this bug exists naive diff should be used.
func hasOpaqueCopyUpBug(d string) error {
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d string) error {
td, err := ioutil.TempDir(d, "opaque-bug-check")
if err != nil {
return err
@@ -29,10 +31,13 @@ func hasOpaqueCopyUpBug(d string) error {
}
}()
// Make directories l1/d, l2/d, l3, work, merged
// Make directories l1/d, l1/d1, l2/d, l3, work, merged
if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
return err
}
if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
return err
}
@@ -75,5 +80,23 @@ func hasOpaqueCopyUpBug(d string) error {
return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
}
// rename "d1" to "d2"
if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
if err.(*os.LinkError).Err == syscall.EXDEV {
return nil
}
return errors.Wrap(err, "failed to rename dir in merged directory")
}
// get the xattr of "d2"
xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
if err != nil {
return errors.Wrap(err, "failed to read redirect flag on upper layer")
}
if string(xattrRedirect) == "d1" {
return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
}
return nil
}

View File

@@ -49,7 +49,6 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
output := bytes.NewBuffer(nil)
cmd.Stdout = output
cmd.Stderr = output
if err := cmd.Start(); err != nil {
w.Close()
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)

View File

@@ -228,7 +228,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
key = strings.ToLower(key)
switch key {
case "overlay.override_kernel_check", "overlay2.override_kernel_check":
logrus.Debugf("overlay: overide_kernelcheck=%s", val)
logrus.Debugf("overlay: override_kernelcheck=%s", val)
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
@@ -287,8 +287,8 @@ func supportsOverlay() error {
func useNaiveDiff(home string) bool {
useNaiveDiffLock.Do(func() {
if err := hasOpaqueCopyUpBug(home); err != nil {
logrus.Warnf("Not using native diff for overlay: %v", err)
if err := doesSupportNativeDiff(home); err != nil {
logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err)
useNaiveDiffOnly = true
}
})
@@ -650,12 +650,22 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
func (d *Driver) Put(id string) error {
d.locker.Lock(id)
defer d.locker.Unlock(id)
dir := d.dir(id)
if _, err := os.Stat(dir); err != nil {
return err
}
mountpoint := path.Join(d.dir(id), "merged")
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
err := unix.Unmount(mountpoint, unix.MNT_DETACH)
if err != nil {
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil {
// If no lower, we used the diff directory, so no work to do
if os.IsNotExist(err) {
return nil
}
return err
}
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
}
return nil

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chrootarchive"
@@ -25,13 +26,18 @@ func init() {
// This sets the home directory for the driver and returns NaiveDiffDriver.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
d := &Driver{
home: home,
homes: []string{home},
idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps),
}
rootIDs := d.idMappings.RootPair()
if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil {
return nil, err
}
for _, option := range options {
if strings.HasPrefix(option, "vfs.imagestore=") {
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
}
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
@@ -40,7 +46,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support.
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
type Driver struct {
home string
homes []string
idMappings *idtools.IDMappings
}
@@ -98,7 +104,17 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
}
func (d *Driver) dir(id string) string {
return filepath.Join(d.home, "dir", filepath.Base(id))
for i, home := range d.homes {
if i > 0 {
home = filepath.Join(home, d.String())
}
candidate := filepath.Join(home, "dir", filepath.Base(id))
fi, err := os.Stat(candidate)
if err == nil && fi.IsDir() {
return candidate
}
}
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
}
// Remove deletes the content from the directory for a given id.
@@ -132,5 +148,8 @@ func (d *Driver) Exists(id string) bool {
// AdditionalImageStores returns additional image stores supported by the driver
func (d *Driver) AdditionalImageStores() []string {
if len(d.homes) > 1 {
return d.homes[1:]
}
return nil
}

View File

@@ -49,4 +49,8 @@ var (
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers")
// ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty.
ErrInvalidBigDataName = errors.New("not a valid name for a big data item")
// ErrDigestUnknown indicates that we were unable to compute the digest of a specified item.
ErrDigestUnknown = errors.New("could not compute digest of item")
)

View File

@@ -10,6 +10,7 @@ import (
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -42,6 +43,10 @@ type Image struct {
// that has been stored, if they're known.
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
// BigDataDigests maps the names in BigDataNames to the digests of the
// data that has been stored, if they're known.
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
// Created is the datestamp for when this image was created. Older
// versions of the library did not track this information, so callers
// will likely want to use the IsZero() method to verify that a value
@@ -136,6 +141,7 @@ func (r *imageStore) Load() error {
ids := make(map[string]*Image)
names := make(map[string]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(images))
for n, image := range images {
ids[image.ID] = images[n]
idlist = append(idlist, image.ID)
@@ -252,6 +258,9 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
if !ok {
return ErrImageUnknown
}
if image.Flags == nil {
image.Flags = make(map[string]interface{})
}
image.Flags[flag] = value
return r.Save()
}
@@ -282,14 +291,15 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
if err == nil {
image = &Image{
ID: id,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
Created: created,
Flags: make(map[string]interface{}),
ID: id,
Names: names,
TopLayer: layer,
Metadata: metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
}
r.images = append(r.images, image)
r.idindex.Add(id)
@@ -402,6 +412,9 @@ func (r *imageStore) Exists(id string) bool {
}
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
}
image, ok := r.lookup(id)
if !ok {
return nil, ErrImageUnknown
@@ -410,16 +423,61 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
if key == "" {
return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name")
}
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return -1, ErrImageUnknown
}
if size, ok := image.BigDataSizes[key]; ok {
return size, nil
}
}
}
return -1, ErrSizeUnknown
}
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
if key == "" {
return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name")
}
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
if data, err := r.BigData(id, key); err == nil && data != nil {
if r.SetBigData(id, key, data) == nil {
image, ok := r.lookup(id)
if !ok {
return "", ErrImageUnknown
}
if d, ok := image.BigDataDigests[key]; ok {
return d, nil
}
}
}
return "", ErrDigestUnknown
}
func (r *imageStore) BigDataNames(id string) ([]string, error) {
image, ok := r.lookup(id)
if !ok {
@@ -429,6 +487,9 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
}
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
}
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath())
}
@@ -441,20 +502,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
add := true
save := false
oldSize, ok := image.BigDataSizes[key]
if image.BigDataSizes == nil {
image.BigDataSizes = make(map[string]int64)
}
oldSize, sizeOk := image.BigDataSizes[key]
image.BigDataSizes[key] = int64(len(data))
if !ok || oldSize != image.BigDataSizes[key] {
if image.BigDataDigests == nil {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
}
addName := true
for _, name := range image.BigDataNames {
if name == key {
add = false
addName = false
break
}
}
if add {
if addName {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
@@ -469,7 +539,7 @@ func (r *imageStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath())
}
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1148
vendor/github.com/containers/storage/images_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -254,6 +254,7 @@ func (r *layerStore) Load() error {
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(layers))
for n, layer := range layers {
ids[layer.ID] = layers[n]
idlist = append(idlist, layer.ID)
@@ -305,6 +306,9 @@ func (r *layerStore) Load() error {
// actually delete.
if r.IsReadWrite() {
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := cleanup.(bool); ok && b {
err = r.Delete(layer.ID)
@@ -338,7 +342,7 @@ func (r *layerStore) Save() error {
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
return err
}
mounts := []layerMountPoint{}
mounts := make([]layerMountPoint, 0, len(r.layers))
for _, layer := range r.layers {
if layer.MountPoint != "" && layer.MountCount > 0 {
mounts = append(mounts, layerMountPoint{
@@ -455,6 +459,9 @@ func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
if !ok {
return ErrLayerUnknown
}
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
}
layer.Flags[flag] = value
return r.Save()
}
@@ -733,7 +740,7 @@ func (r *layerStore) Wipe() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
}
ids := []string{}
ids := make([]string, 0, len(r.byid))
for id := range r.byid {
ids = append(ids, id)
}

1713
vendor/github.com/containers/storage/layers_ffjson.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/opencontainers/go-digest"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@@ -87,6 +87,10 @@ type ROBigDataStore interface {
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
@@ -327,6 +331,10 @@ type Store interface {
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data associated
// with an image.
SetImageBigData(id, key string, data []byte) error
@@ -343,6 +351,10 @@ type Store interface {
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
@@ -728,11 +740,14 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
if err != nil {
return nil, -1, err
}
rlstores, err := s.ROLayerStores()
if err != nil {
return nil, -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
@@ -747,9 +762,15 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
id = stringid.GenerateRandomID()
}
if parent != "" {
if l, err := rlstore.Get(parent); err == nil && l != nil {
parent = l.ID
} else {
var ilayer *Layer
for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) {
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
parent = ilayer.ID
break
}
}
if ilayer == nil {
return nil, -1, ErrLayerUnknown
}
containers, err := rcstore.Containers()
@@ -1026,6 +1047,30 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
ristore, err := s.ImageStore()
if err != nil {
return "", err
}
stores, err := s.ROImageStores()
if err != nil {
return "", err
}
stores = append([]ROImageStore{ristore}, stores...)
for _, ristore := range stores {
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
ristore.Load()
}
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
return d, nil
}
}
return "", ErrDigestUnknown
}
func (s *store) ImageBigData(id, key string) ([]byte, error) {
istore, err := s.ImageStore()
if err != nil {
@@ -1089,10 +1134,22 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigDataSize(id, key)
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigDataDigest(id, key)
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
rcstore, err := s.ContainerStore()
if err != nil {
@@ -1103,7 +1160,6 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.BigData(id, key)
}
@@ -1117,7 +1173,6 @@ func (s *store) SetContainerBigData(id, key string, data []byte) error {
if modified, err := rcstore.Modified(); modified || err != nil {
rcstore.Load()
}
return rcstore.SetBigData(id, key, data)
}
@@ -1841,10 +1896,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
@@ -2238,7 +2299,7 @@ func makeBigDataBaseName(key string) string {
}
func stringSliceWithoutValue(slice []string, value string) []string {
modified := []string{}
modified := make([]string, 0, len(slice))
for _, v := range slice {
if v == value {
continue

View File

@@ -3,7 +3,6 @@ github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/engine-api 4290f40c056686fcaa5c9caf02eac1dde9315adf
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
@@ -19,3 +18,4 @@ github.com/tchap/go-patricia v2.2.6
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac