Merge pull request #608 from rhatdan/vendor

Vendor in latest containers/storage and image
This commit is contained in:
Valentin Rothberg
2019-03-01 15:05:56 +01:00
committed by GitHub
9 changed files with 317 additions and 162 deletions

View File

@@ -2,13 +2,13 @@
github.com/urfave/cli v1.20.0
github.com/kr/pretty v0.1.0
github.com/kr/text v0.1.0
github.com/containers/image 22beff9696cfe608ad456c3f4a8053897153f979
github.com/containers/image v1.5
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/containers/storage v1.10
github.com/containers/storage v1.11
github.com/sirupsen/logrus v1.0.0
github.com/go-check/check v1
github.com/stretchr/testify v1.1.3

View File

@@ -22,7 +22,7 @@ import (
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb"
@@ -91,8 +91,6 @@ type copier struct {
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
progressPool *mpb.Progress
progressWG *sync.WaitGroup
progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache
copyInParallel bool
@@ -168,15 +166,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
progressOutput = ioutil.Discard
}
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
wg := new(sync.WaitGroup)
c := &copier{
dest: dest,
rawSource: rawSource,
reportWriter: reportWriter,
progressOutput: progressOutput,
progressInterval: options.ProgressInterval,
progressPool: mpb.New(mpb.WithWidth(40), mpb.WithOutput(progressOutput), mpb.WithWaitGroup(wg)),
progressWG: wg,
progress: options.Progress,
copyInParallel: copyInParallel,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
@@ -428,11 +423,6 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
return nil
}
// shortDigest returns the first 12 characters of the digest.
func shortDigest(d digest.Digest) string {
return d.Encoded()[:12]
}
// isTTY returns true if the io.Writer is a file and a tty.
func isTTY(w io.Writer) bool {
if f, ok := w.(*os.File); ok {
@@ -496,27 +486,29 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
}
data[index] = cld
if bar != nil {
bar.SetTotal(srcLayer.Size, true)
bar.SetTotal(srcLayer.Size, true)
}
func() { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer progressCleanup()
progressBars := make([]*mpb.Bar, numLayers)
for i, srcInfo := range srcInfos {
progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
}
}
progressBars := make([]*mpb.Bar, numLayers)
for i, srcInfo := range srcInfos {
progressBars[i] = ic.c.createProgressBar(srcInfo, shortDigest(srcInfo.Digest), "blob")
}
for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1)
go copyLayerHelper(i, srcLayer, progressBars[i])
}
for i, srcLayer := range srcInfos {
copySemaphore.Acquire(ctx, 1)
go copyLayerHelper(i, srcLayer, progressBars[i])
}
// Wait for all layers to be copied
copyGroup.Wait()
}()
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
// Wait for all layers to be copied
copyGroup.Wait()
for i, cld := range data {
if cld.err != nil {
return cld.err
@@ -580,7 +572,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
return nil, err
}
ic.c.progressPool.Wait()
ic.c.Printf("Writing manifest to image destination\n")
if err := ic.c.dest.PutManifest(ctx, manifest); err != nil {
return nil, errors.Wrap(err, "Error writing manifest")
@@ -588,12 +579,33 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
return manifest, nil
}
// createProgressBar creates a mpb.Bar. Note that if the copier's reportWriter
// newProgressPool creates a *mpb.Progress and a cleanup function.
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
ctx, cancel := context.WithCancel(ctx)
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
return pool, func() {
cancel()
pool.Wait()
}
}
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is ioutil.Discard, the progress bar's output will be discarded
func (c *copier) createProgressBar(info types.BlobInfo, name, kind string) *mpb.Bar {
bar := c.progressPool.AddBar(info.Size,
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
// shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
maxPrefixLen := len("Copying blob ") + shortDigestLen
if len(prefix) > maxPrefixLen {
prefix = prefix[:maxPrefixLen]
}
bar := pool.AddBar(info.Size,
mpb.PrependDecorators(
decor.Name(fmt.Sprintf("Copying %s %s", kind, name)),
decor.Name(prefix),
),
mpb.AppendDecorators(
decor.CountersKibiByte("%.1f / %.1f"),
@@ -614,14 +626,19 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
// make the short digest only 10 characters long to make it align with the blob output
bar := c.createProgressBar(srcInfo, shortDigest(srcInfo.Digest)[0:10], "config")
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool, progressCleanup := c.newProgressPool(ctx)
defer progressCleanup()
bar := c.createProgressBar(progressPool, srcInfo, "config")
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
if err != nil {
return types.BlobInfo{}, err
}
bar.SetTotal(int64(len(configBlob)), true)
return destInfo, nil
}()
if err != nil {
return err
}
if bar != nil {
bar.SetTotal(0, true)
return nil
}
if destInfo.Digest != srcInfo.Digest {
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
@@ -650,7 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest))
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
return blobInfo, cachedDiffID, nil
}
}
@@ -752,8 +769,6 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
c.progressWG.Add(1)
defer c.progressWG.Done()
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream

View File

@@ -1,6 +1,7 @@
package blobinfocache
import (
"sync"
"time"
"github.com/containers/image/types"
@@ -17,6 +18,7 @@ type locationKey struct {
// memoryCache implements an in-memory-only BlobInfoCache
type memoryCache struct {
mutex *sync.Mutex // synchronizes concurrent accesses
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
@@ -28,6 +30,7 @@ type memoryCache struct {
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
func NewMemoryCache() types.BlobInfoCache {
return &memoryCache{
mutex: new(sync.Mutex),
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
@@ -38,6 +41,15 @@ func NewMemoryCache() types.BlobInfoCache {
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
mem.mutex.Lock()
defer mem.mutex.Unlock()
return mem.uncompressedDigest(anyDigest)
}
// uncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
return d
}
@@ -56,6 +68,8 @@ func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
}
@@ -72,6 +86,8 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
locationScope, ok := mem.knownLocations[key]
if !ok {
@@ -103,11 +119,13 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []candidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest {

View File

@@ -11,7 +11,7 @@ const (
VersionPatch = 5
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

View File

@@ -272,7 +272,7 @@ func (r *imageStore) Load() error {
}
}
}
if shouldSave && !r.IsReadWrite() {
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateImageNames
}
r.images = images
@@ -291,7 +291,7 @@ func (r *imageStore) Save() error {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
}
if !r.Locked() {
return errors.New("image store is not locked")
return errors.New("image store is not locked for writing")
}
rpath := r.imagespath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {

View File

@@ -229,6 +229,7 @@ type LayerStore interface {
type layerStore struct {
lockfile Locker
mountsLockfile Locker
rundir string
driver drivers.Driver
layerdir string
@@ -291,7 +292,6 @@ func (r *layerStore) Load() error {
idlist := []string{}
ids := make(map[string]*Layer)
names := make(map[string]*Layer)
mounts := make(map[string]*Layer)
compressedsums := make(map[digest.Digest][]string)
uncompressedsums := make(map[digest.Digest][]string)
if r.lockfile.IsReadWrite() {
@@ -319,39 +319,29 @@ func (r *layerStore) Load() error {
label.ReserveLabel(layer.MountLabel)
}
}
err = nil
}
if shouldSave && !r.IsReadWrite() {
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
return ErrDuplicateLayerNames
}
mpath := r.mountspath()
data, err = ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := ids[mount.ID]; ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
}
}
r.layers = layers
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
r.bymount = mounts
r.bycompressedsum = compressedsums
r.byuncompressedsum = uncompressedsums
err = nil
// Load and merge information about which layers are mounted, and where.
if r.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if err = r.loadMounts(); err != nil {
return err
}
}
// Last step: if we're writable, try to remove anything that a previous
// user of this storage area marked for deletion but didn't manage to
// actually delete.
if r.IsReadWrite() {
if r.IsReadWrite() && r.Locked() {
for _, layer := range r.layers {
if layer.Flags == nil {
layer.Flags = make(map[string]interface{})
@@ -373,12 +363,36 @@ func (r *layerStore) Load() error {
return err
}
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
data, err := ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := r.lookup(mount.ID); ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
}
err = nil
}
r.bymount = mounts
return err
}
func (r *layerStore) Save() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
if !r.Locked() {
return errors.New("layer store is not locked")
return errors.New("layer store is not locked for writing")
}
rpath := r.layerspath()
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
@@ -388,6 +402,25 @@ func (r *layerStore) Save() error {
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
return err
}
if !r.IsReadWrite() {
return nil
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
defer r.mountsLockfile.Touch()
return r.saveMounts()
}
func (r *layerStore) saveMounts() error {
if !r.IsReadWrite() {
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
}
if !r.mountsLockfile.Locked() {
return errors.New("layer store mount information is not locked for writing")
}
mpath := r.mountspath()
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
return err
@@ -406,11 +439,10 @@ func (r *layerStore) Save() error {
if err != nil {
return err
}
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err
}
defer r.Touch()
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
return r.loadMounts()
}
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
@@ -426,16 +458,21 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap
}
lockfile.Lock()
defer lockfile.Unlock()
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
if err != nil {
return nil, err
}
rlstore := layerStore{
lockfile: lockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
uidMap: copyIDMap(uidMap),
gidMap: copyIDMap(gidMap),
lockfile: lockfile,
mountsLockfile: mountsLockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
uidMap: copyIDMap(uidMap),
gidMap: copyIDMap(gidMap),
}
if err := rlstore.Load(); err != nil {
return nil, err
@@ -451,13 +488,14 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
lockfile.Lock()
defer lockfile.Unlock()
rlstore := layerStore{
lockfile: lockfile,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
lockfile: lockfile,
mountsLockfile: nil,
driver: driver,
rundir: rundir,
layerdir: layerdir,
byid: make(map[string]*Layer),
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
}
if err := rlstore.Load(); err != nil {
return nil, err
@@ -673,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
}
func (r *layerStore) Mounted(id string) (int, error) {
if !r.IsReadWrite() {
return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return 0, err
}
}
layer, ok := r.lookup(id)
if !ok {
return 0, ErrLayerUnknown
@@ -684,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
if !r.IsReadWrite() {
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return "", err
}
}
defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
return "", ErrLayerUnknown
}
if layer.MountCount > 0 {
layer.MountCount++
return layer.MountPoint, r.Save()
return layer.MountPoint, r.saveMounts()
}
if options.MountLabel == "" {
options.MountLabel = layer.MountLabel
@@ -709,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
layer.MountPoint = filepath.Clean(mountpoint)
layer.MountCount++
r.bymount[layer.MountPoint] = layer
err = r.Save()
err = r.saveMounts()
}
return mountpoint, err
}
@@ -718,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
if !r.IsReadWrite() {
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
}
r.mountsLockfile.Lock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return false, err
}
}
defer r.mountsLockfile.Touch()
layer, ok := r.lookup(id)
if !ok {
layerByMount, ok := r.bymount[filepath.Clean(id)]
@@ -731,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
if layer.MountCount > 1 {
layer.MountCount--
return true, r.Save()
return true, r.saveMounts()
}
err := r.driver.Put(id)
if err == nil || os.IsNotExist(err) {
@@ -740,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
}
layer.MountCount--
layer.MountPoint = ""
return false, r.Save()
return false, r.saveMounts()
}
return true, err
}
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
if !r.IsReadWrite() {
return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
}
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
if err = r.loadMounts(); err != nil {
return nil, nil, err
}
}
layer, ok := r.lookup(id)
if !ok {
return nil, nil, ErrLayerUnknown
@@ -862,14 +936,23 @@ func (r *layerStore) Delete(id string) error {
return ErrLayerUnknown
}
id = layer.ID
// This check is needed for idempotency of delete where the layer could have been
// already unmounted (since c/storage gives you that API directly)
for layer.MountCount > 0 {
// The layer may already have been explicitly unmounted, but if not, we
// should try to clean that up before we start deleting anything at the
// driver level.
mountCount, err := r.Mounted(id)
if err != nil {
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
}
for mountCount > 0 {
if _, err := r.Unmount(id, false); err != nil {
return err
}
mountCount, err = r.Mounted(id)
if err != nil {
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
}
}
err := r.driver.Remove(id)
err = r.driver.Remove(id)
if err == nil {
os.Remove(r.tspath(id))
delete(r.byid, id)
@@ -1235,7 +1318,20 @@ func (r *layerStore) Touch() error {
}
func (r *layerStore) Modified() (bool, error) {
return r.lockfile.Modified()
var mmodified bool
lmodified, err := r.lockfile.Modified()
if err != nil {
return lmodified, err
}
if r.IsReadWrite() {
r.mountsLockfile.RLock()
defer r.mountsLockfile.Unlock()
mmodified, err = r.mountsLockfile.Modified()
if err != nil {
return lmodified, err
}
}
return lmodified || mmodified, nil
}
func (r *layerStore) IsReadWrite() bool {

View File

@@ -35,7 +35,7 @@ type Locker interface {
// IsReadWrite() checks if the lock file is read-write
IsReadWrite() bool
// Locked() checks if lock is locked
// Locked() checks if lock is locked for writing by a thread in this process
Locked() bool
}
@@ -66,7 +66,10 @@ func getLockfile(path string, ro bool) (Locker, error) {
if lockfiles == nil {
lockfiles = make(map[string]Locker)
}
cleanPath := filepath.Clean(path)
cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
}
if locker, ok := lockfiles[cleanPath]; ok {
if ro && locker.IsReadWrite() {
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)

View File

@@ -32,7 +32,7 @@ func getLockFile(path string, ro bool) (Locker, error) {
}
return &lockfile{
stateMutex: &sync.Mutex{},
writeMutex: &sync.Mutex{},
rwMutex: &sync.RWMutex{},
file: path,
fd: uintptr(fd),
lw: stringid.GenerateRandomID(),
@@ -42,10 +42,10 @@ func getLockFile(path string, ro bool) (Locker, error) {
}
type lockfile struct {
// stateMutex is used to synchronize concurrent accesses
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
// writeMutex is used to serialize and avoid recursive writer locks
writeMutex *sync.Mutex
counter int64
file string
fd uintptr
@@ -65,23 +65,24 @@ func (l *lockfile) lock(l_type int16) {
Len: 0,
Pid: int32(os.Getpid()),
}
if l_type == unix.F_WRLCK {
// If we try to lock as a writer, lock the writerMutex first to
// avoid multiple writer acquisitions of the same process.
// Note: it's important to lock it prior to the stateMutex to
// avoid a deadlock.
l.writeMutex.Lock()
switch l_type {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
l.rwMutex.Lock()
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
}
l.stateMutex.Lock()
l.locktype = l_type
if l.counter == 0 {
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. If it's greater than that, we're owning
// the lock already and can only be a reader.
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = l_type
l.locked = true
l.counter++
l.stateMutex.Unlock()
@@ -133,19 +134,28 @@ func (l *lockfile) Unlock() {
time.Sleep(10 * time.Millisecond)
}
}
if l.locktype == unix.F_WRLCK {
l.writeMutex.Unlock()
if l.locktype == unix.F_RDLCK {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
// Locked checks if lockfile is locked.
// Locked checks if lockfile is locked for writing by a thread in this process.
func (l *lockfile) Locked() bool {
return l.locked
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
return l.locked && (l.locktype == unix.F_WRLCK)
}
// Touch updates the lock file with the UID of the user.
func (l *lockfile) Touch() error {
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
l.stateMutex.Unlock()
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
@@ -170,6 +180,11 @@ func (l *lockfile) Touch() error {
// was loaded.
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
l.stateMutex.Unlock()
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err

View File

@@ -550,10 +550,18 @@ func GetStore(options StoreOptions) (Store, error) {
}
if options.GraphRoot != "" {
options.GraphRoot = filepath.Clean(options.GraphRoot)
dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
}
options.GraphRoot = dir
}
if options.RunRoot != "" {
options.RunRoot = filepath.Clean(options.RunRoot)
dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
}
options.RunRoot = dir
}
storesLock.Lock()
@@ -1321,7 +1329,7 @@ func (s *store) Metadata(id string) (string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1343,7 +1351,7 @@ func (s *store) Metadata(id string) (string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1359,7 +1367,7 @@ func (s *store) Metadata(id string) (string, error) {
if err != nil {
return "", err
}
cstore.Lock()
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
@@ -1383,7 +1391,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1409,7 +1417,7 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1436,7 +1444,7 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
stores = append([]ROImageStore{ristore}, stores...)
for _, r := range stores {
ristore := r
ristore.Lock()
ristore.RLock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
@@ -1507,7 +1515,7 @@ func (s *store) ImageSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1529,7 +1537,7 @@ func (s *store) ImageSize(id string) (int64, error) {
// Look for the image's record.
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1617,7 +1625,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1641,7 +1649,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
if err != nil {
return -1, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1705,7 +1713,7 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1721,7 +1729,7 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
if err != nil {
return -1, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1736,7 +1744,7 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
if err != nil {
return "", err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1751,7 +1759,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
if err != nil {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1787,7 +1795,7 @@ func (s *store) Exists(id string) bool {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1809,7 +1817,7 @@ func (s *store) Exists(id string) bool {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1825,7 +1833,7 @@ func (s *store) Exists(id string) bool {
if err != nil {
return false
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1912,7 +1920,7 @@ func (s *store) Names(id string) ([]string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1934,7 +1942,7 @@ func (s *store) Names(id string) ([]string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1950,7 +1958,7 @@ func (s *store) Names(id string) ([]string, error) {
if err != nil {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -1974,7 +1982,7 @@ func (s *store) Lookup(name string) (string, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -1996,7 +2004,7 @@ func (s *store) Lookup(name string) (string, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2012,7 +2020,7 @@ func (s *store) Lookup(name string) (string, error) {
if err != nil {
return "", err
}
cstore.Lock()
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
@@ -2464,7 +2472,7 @@ func (s *store) Mounted(id string) (int, error) {
if err != nil {
return 0, err
}
rlstore.Lock()
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
@@ -2507,7 +2515,7 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2532,7 +2540,7 @@ func (s *store) DiffSize(from, to string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2612,7 +2620,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2659,7 +2667,7 @@ func (s *store) LayerSize(id string) (int64, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2678,7 +2686,7 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
if err != nil {
return nil, nil, err
}
rlstore.Lock()
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
@@ -2700,14 +2708,14 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
if err != nil {
return nil, nil, err
}
rlstore.Lock()
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2738,7 +2746,7 @@ func (s *store) Layers() ([]Layer, error) {
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2767,7 +2775,7 @@ func (s *store) Images() ([]Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2789,7 +2797,7 @@ func (s *store) Containers() ([]Container, error) {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2811,7 +2819,7 @@ func (s *store) Layer(id string) (*Layer, error) {
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2837,7 +2845,7 @@ func (s *store) Image(id string) (*Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2870,7 +2878,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2903,7 +2911,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
return nil, err
}
for _, store := range append([]ROImageStore{istore}, istores...) {
store.Lock()
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
@@ -2924,7 +2932,7 @@ func (s *store) Container(id string) (*Container, error) {
if err != nil {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2940,7 +2948,7 @@ func (s *store) ContainerLayerID(id string) (string, error) {
if err != nil {
return "", err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2963,7 +2971,7 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
if err != nil {
return nil, err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -2988,7 +2996,7 @@ func (s *store) ContainerDirectory(id string) (string, error) {
if err != nil {
return "", err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
@@ -3015,7 +3023,7 @@ func (s *store) ContainerRunDirectory(id string) (string, error) {
return "", err
}
rcstore.Lock()
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {