mirror of
https://github.com/containers/skopeo.git
synced 2025-09-21 01:49:50 +00:00
Vendor in latest containers/storage and image
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
@@ -2,13 +2,13 @@
|
|||||||
github.com/urfave/cli v1.20.0
|
github.com/urfave/cli v1.20.0
|
||||||
github.com/kr/pretty v0.1.0
|
github.com/kr/pretty v0.1.0
|
||||||
github.com/kr/text v0.1.0
|
github.com/kr/text v0.1.0
|
||||||
github.com/containers/image 22beff9696cfe608ad456c3f4a8053897153f979
|
github.com/containers/image v1.5
|
||||||
github.com/vbauerster/mpb v3.3.4
|
github.com/vbauerster/mpb v3.3.4
|
||||||
github.com/mattn/go-isatty v0.0.4
|
github.com/mattn/go-isatty v0.0.4
|
||||||
github.com/VividCortex/ewma v1.1.1
|
github.com/VividCortex/ewma v1.1.1
|
||||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||||
github.com/containers/storage v1.10
|
github.com/containers/storage v1.11
|
||||||
github.com/sirupsen/logrus v1.0.0
|
github.com/sirupsen/logrus v1.0.0
|
||||||
github.com/go-check/check v1
|
github.com/go-check/check v1
|
||||||
github.com/stretchr/testify v1.1.3
|
github.com/stretchr/testify v1.1.3
|
||||||
|
75
vendor/github.com/containers/image/copy/copy.go
generated
vendored
75
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/containers/image/transports"
|
"github.com/containers/image/transports"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
"github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/vbauerster/mpb"
|
"github.com/vbauerster/mpb"
|
||||||
@@ -91,8 +91,6 @@ type copier struct {
|
|||||||
reportWriter io.Writer
|
reportWriter io.Writer
|
||||||
progressOutput io.Writer
|
progressOutput io.Writer
|
||||||
progressInterval time.Duration
|
progressInterval time.Duration
|
||||||
progressPool *mpb.Progress
|
|
||||||
progressWG *sync.WaitGroup
|
|
||||||
progress chan types.ProgressProperties
|
progress chan types.ProgressProperties
|
||||||
blobInfoCache types.BlobInfoCache
|
blobInfoCache types.BlobInfoCache
|
||||||
copyInParallel bool
|
copyInParallel bool
|
||||||
@@ -168,15 +166,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||||||
progressOutput = ioutil.Discard
|
progressOutput = ioutil.Discard
|
||||||
}
|
}
|
||||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||||
wg := new(sync.WaitGroup)
|
|
||||||
c := &copier{
|
c := &copier{
|
||||||
dest: dest,
|
dest: dest,
|
||||||
rawSource: rawSource,
|
rawSource: rawSource,
|
||||||
reportWriter: reportWriter,
|
reportWriter: reportWriter,
|
||||||
progressOutput: progressOutput,
|
progressOutput: progressOutput,
|
||||||
progressInterval: options.ProgressInterval,
|
progressInterval: options.ProgressInterval,
|
||||||
progressPool: mpb.New(mpb.WithWidth(40), mpb.WithOutput(progressOutput), mpb.WithWaitGroup(wg)),
|
|
||||||
progressWG: wg,
|
|
||||||
progress: options.Progress,
|
progress: options.Progress,
|
||||||
copyInParallel: copyInParallel,
|
copyInParallel: copyInParallel,
|
||||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||||
@@ -428,11 +423,6 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// shortDigest returns the first 12 characters of the digest.
|
|
||||||
func shortDigest(d digest.Digest) string {
|
|
||||||
return d.Encoded()[:12]
|
|
||||||
}
|
|
||||||
|
|
||||||
// isTTY returns true if the io.Writer is a file and a tty.
|
// isTTY returns true if the io.Writer is a file and a tty.
|
||||||
func isTTY(w io.Writer) bool {
|
func isTTY(w io.Writer) bool {
|
||||||
if f, ok := w.(*os.File); ok {
|
if f, ok := w.(*os.File); ok {
|
||||||
@@ -496,14 +486,16 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
|
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
|
||||||
}
|
}
|
||||||
data[index] = cld
|
data[index] = cld
|
||||||
if bar != nil {
|
|
||||||
bar.SetTotal(srcLayer.Size, true)
|
bar.SetTotal(srcLayer.Size, true)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
func() { // A scope for defer
|
||||||
|
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||||
|
defer progressCleanup()
|
||||||
|
|
||||||
progressBars := make([]*mpb.Bar, numLayers)
|
progressBars := make([]*mpb.Bar, numLayers)
|
||||||
for i, srcInfo := range srcInfos {
|
for i, srcInfo := range srcInfos {
|
||||||
progressBars[i] = ic.c.createProgressBar(srcInfo, shortDigest(srcInfo.Digest), "blob")
|
progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, srcLayer := range srcInfos {
|
for i, srcLayer := range srcInfos {
|
||||||
@@ -511,12 +503,12 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||||||
go copyLayerHelper(i, srcLayer, progressBars[i])
|
go copyLayerHelper(i, srcLayer, progressBars[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
destInfos := make([]types.BlobInfo, numLayers)
|
|
||||||
diffIDs := make([]digest.Digest, numLayers)
|
|
||||||
|
|
||||||
// Wait for all layers to be copied
|
// Wait for all layers to be copied
|
||||||
copyGroup.Wait()
|
copyGroup.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
destInfos := make([]types.BlobInfo, numLayers)
|
||||||
|
diffIDs := make([]digest.Digest, numLayers)
|
||||||
for i, cld := range data {
|
for i, cld := range data {
|
||||||
if cld.err != nil {
|
if cld.err != nil {
|
||||||
return cld.err
|
return cld.err
|
||||||
@@ -580,7 +572,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ic.c.progressPool.Wait()
|
|
||||||
ic.c.Printf("Writing manifest to image destination\n")
|
ic.c.Printf("Writing manifest to image destination\n")
|
||||||
if err := ic.c.dest.PutManifest(ctx, manifest); err != nil {
|
if err := ic.c.dest.PutManifest(ctx, manifest); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error writing manifest")
|
return nil, errors.Wrap(err, "Error writing manifest")
|
||||||
@@ -588,12 +579,33 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
|||||||
return manifest, nil
|
return manifest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// createProgressBar creates a mpb.Bar. Note that if the copier's reportWriter
|
// newProgressPool creates a *mpb.Progress and a cleanup function.
|
||||||
|
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
|
||||||
|
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
|
||||||
|
return pool, func() {
|
||||||
|
cancel()
|
||||||
|
pool.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||||
// is ioutil.Discard, the progress bar's output will be discarded
|
// is ioutil.Discard, the progress bar's output will be discarded
|
||||||
func (c *copier) createProgressBar(info types.BlobInfo, name, kind string) *mpb.Bar {
|
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
|
||||||
bar := c.progressPool.AddBar(info.Size,
|
// shortDigestLen is the length of the digest used for blobs.
|
||||||
|
const shortDigestLen = 12
|
||||||
|
|
||||||
|
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||||
|
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||||
|
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||||
|
if len(prefix) > maxPrefixLen {
|
||||||
|
prefix = prefix[:maxPrefixLen]
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := pool.AddBar(info.Size,
|
||||||
mpb.PrependDecorators(
|
mpb.PrependDecorators(
|
||||||
decor.Name(fmt.Sprintf("Copying %s %s", kind, name)),
|
decor.Name(prefix),
|
||||||
),
|
),
|
||||||
mpb.AppendDecorators(
|
mpb.AppendDecorators(
|
||||||
decor.CountersKibiByte("%.1f / %.1f"),
|
decor.CountersKibiByte("%.1f / %.1f"),
|
||||||
@@ -614,14 +626,19 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
|||||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make the short digest only 10 characters long to make it align with the blob output
|
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||||
bar := c.createProgressBar(srcInfo, shortDigest(srcInfo.Digest)[0:10], "config")
|
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||||
|
defer progressCleanup()
|
||||||
|
bar := c.createProgressBar(progressPool, srcInfo, "config")
|
||||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
if bar != nil {
|
bar.SetTotal(int64(len(configBlob)), true)
|
||||||
bar.SetTotal(0, true)
|
return destInfo, nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if destInfo.Digest != srcInfo.Digest {
|
if destInfo.Digest != srcInfo.Digest {
|
||||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||||
@@ -650,7 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
|||||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||||
}
|
}
|
||||||
if reused {
|
if reused {
|
||||||
logrus.Debugf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest))
|
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||||
return blobInfo, cachedDiffID, nil
|
return blobInfo, cachedDiffID, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -752,8 +769,6 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
|||||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||||
c.progressWG.Add(1)
|
|
||||||
defer c.progressWG.Done()
|
|
||||||
// The copying happens through a pipeline of connected io.Readers.
|
// The copying happens through a pipeline of connected io.Readers.
|
||||||
// === Input: srcStream
|
// === Input: srcStream
|
||||||
|
|
||||||
|
20
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
20
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
@@ -1,6 +1,7 @@
|
|||||||
package blobinfocache
|
package blobinfocache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
@@ -17,6 +18,7 @@ type locationKey struct {
|
|||||||
|
|
||||||
// memoryCache implements an in-memory-only BlobInfoCache
|
// memoryCache implements an in-memory-only BlobInfoCache
|
||||||
type memoryCache struct {
|
type memoryCache struct {
|
||||||
|
mutex *sync.Mutex // synchronizes concurrent accesses
|
||||||
uncompressedDigests map[digest.Digest]digest.Digest
|
uncompressedDigests map[digest.Digest]digest.Digest
|
||||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||||
@@ -28,6 +30,7 @@ type memoryCache struct {
|
|||||||
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
||||||
func NewMemoryCache() types.BlobInfoCache {
|
func NewMemoryCache() types.BlobInfoCache {
|
||||||
return &memoryCache{
|
return &memoryCache{
|
||||||
|
mutex: new(sync.Mutex),
|
||||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||||
@@ -38,6 +41,15 @@ func NewMemoryCache() types.BlobInfoCache {
|
|||||||
// May return anyDigest if it is known to be uncompressed.
|
// May return anyDigest if it is known to be uncompressed.
|
||||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||||
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||||
|
mem.mutex.Lock()
|
||||||
|
defer mem.mutex.Unlock()
|
||||||
|
return mem.uncompressedDigest(anyDigest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// uncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||||
|
// May return anyDigest if it is known to be uncompressed.
|
||||||
|
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||||
|
func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
@@ -56,6 +68,8 @@ func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
|
|||||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||||
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||||
|
mem.mutex.Lock()
|
||||||
|
defer mem.mutex.Unlock()
|
||||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||||
}
|
}
|
||||||
@@ -72,6 +86,8 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
|
|||||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||||
// and can be reused given the opaque location data.
|
// and can be reused given the opaque location data.
|
||||||
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||||
|
mem.mutex.Lock()
|
||||||
|
defer mem.mutex.Unlock()
|
||||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||||
locationScope, ok := mem.knownLocations[key]
|
locationScope, ok := mem.knownLocations[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -103,11 +119,13 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
|
|||||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||||
// uncompressed digest.
|
// uncompressed digest.
|
||||||
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||||
|
mem.mutex.Lock()
|
||||||
|
defer mem.mutex.Unlock()
|
||||||
res := []candidateWithTime{}
|
res := []candidateWithTime{}
|
||||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||||
var uncompressedDigest digest.Digest // = ""
|
var uncompressedDigest digest.Digest // = ""
|
||||||
if canSubstitute {
|
if canSubstitute {
|
||||||
if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||||
for d := range otherDigests {
|
for d := range otherDigests {
|
||||||
if d != primaryDigest && d != uncompressedDigest {
|
if d != primaryDigest && d != uncompressedDigest {
|
||||||
|
2
vendor/github.com/containers/image/version/version.go
generated
vendored
2
vendor/github.com/containers/image/version/version.go
generated
vendored
@@ -11,7 +11,7 @@ const (
|
|||||||
VersionPatch = 5
|
VersionPatch = 5
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = "-dev"
|
VersionDev = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
4
vendor/github.com/containers/storage/images.go
generated
vendored
4
vendor/github.com/containers/storage/images.go
generated
vendored
@@ -272,7 +272,7 @@ func (r *imageStore) Load() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shouldSave && !r.IsReadWrite() {
|
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
||||||
return ErrDuplicateImageNames
|
return ErrDuplicateImageNames
|
||||||
}
|
}
|
||||||
r.images = images
|
r.images = images
|
||||||
@@ -291,7 +291,7 @@ func (r *imageStore) Save() error {
|
|||||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
|
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
|
||||||
}
|
}
|
||||||
if !r.Locked() {
|
if !r.Locked() {
|
||||||
return errors.New("image store is not locked")
|
return errors.New("image store is not locked for writing")
|
||||||
}
|
}
|
||||||
rpath := r.imagespath()
|
rpath := r.imagespath()
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
|
166
vendor/github.com/containers/storage/layers.go
generated
vendored
166
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -229,6 +229,7 @@ type LayerStore interface {
|
|||||||
|
|
||||||
type layerStore struct {
|
type layerStore struct {
|
||||||
lockfile Locker
|
lockfile Locker
|
||||||
|
mountsLockfile Locker
|
||||||
rundir string
|
rundir string
|
||||||
driver drivers.Driver
|
driver drivers.Driver
|
||||||
layerdir string
|
layerdir string
|
||||||
@@ -291,7 +292,6 @@ func (r *layerStore) Load() error {
|
|||||||
idlist := []string{}
|
idlist := []string{}
|
||||||
ids := make(map[string]*Layer)
|
ids := make(map[string]*Layer)
|
||||||
names := make(map[string]*Layer)
|
names := make(map[string]*Layer)
|
||||||
mounts := make(map[string]*Layer)
|
|
||||||
compressedsums := make(map[digest.Digest][]string)
|
compressedsums := make(map[digest.Digest][]string)
|
||||||
uncompressedsums := make(map[digest.Digest][]string)
|
uncompressedsums := make(map[digest.Digest][]string)
|
||||||
if r.lockfile.IsReadWrite() {
|
if r.lockfile.IsReadWrite() {
|
||||||
@@ -319,39 +319,29 @@ func (r *layerStore) Load() error {
|
|||||||
label.ReserveLabel(layer.MountLabel)
|
label.ReserveLabel(layer.MountLabel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err = nil
|
||||||
}
|
}
|
||||||
if shouldSave && !r.IsReadWrite() {
|
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
||||||
return ErrDuplicateLayerNames
|
return ErrDuplicateLayerNames
|
||||||
}
|
}
|
||||||
mpath := r.mountspath()
|
|
||||||
data, err = ioutil.ReadFile(mpath)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
layerMounts := []layerMountPoint{}
|
|
||||||
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
|
|
||||||
for _, mount := range layerMounts {
|
|
||||||
if mount.MountPoint != "" {
|
|
||||||
if layer, ok := ids[mount.ID]; ok {
|
|
||||||
mounts[mount.MountPoint] = layer
|
|
||||||
layer.MountPoint = mount.MountPoint
|
|
||||||
layer.MountCount = mount.MountCount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.layers = layers
|
r.layers = layers
|
||||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
r.idindex = truncindex.NewTruncIndex(idlist)
|
||||||
r.byid = ids
|
r.byid = ids
|
||||||
r.byname = names
|
r.byname = names
|
||||||
r.bymount = mounts
|
|
||||||
r.bycompressedsum = compressedsums
|
r.bycompressedsum = compressedsums
|
||||||
r.byuncompressedsum = uncompressedsums
|
r.byuncompressedsum = uncompressedsums
|
||||||
err = nil
|
// Load and merge information about which layers are mounted, and where.
|
||||||
|
if r.IsReadWrite() {
|
||||||
|
r.mountsLockfile.RLock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
if err = r.loadMounts(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// Last step: if we're writable, try to remove anything that a previous
|
// Last step: if we're writable, try to remove anything that a previous
|
||||||
// user of this storage area marked for deletion but didn't manage to
|
// user of this storage area marked for deletion but didn't manage to
|
||||||
// actually delete.
|
// actually delete.
|
||||||
if r.IsReadWrite() {
|
if r.IsReadWrite() && r.Locked() {
|
||||||
for _, layer := range r.layers {
|
for _, layer := range r.layers {
|
||||||
if layer.Flags == nil {
|
if layer.Flags == nil {
|
||||||
layer.Flags = make(map[string]interface{})
|
layer.Flags = make(map[string]interface{})
|
||||||
@@ -373,12 +363,36 @@ func (r *layerStore) Load() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) loadMounts() error {
|
||||||
|
mounts := make(map[string]*Layer)
|
||||||
|
mpath := r.mountspath()
|
||||||
|
data, err := ioutil.ReadFile(mpath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
layerMounts := []layerMountPoint{}
|
||||||
|
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
|
||||||
|
for _, mount := range layerMounts {
|
||||||
|
if mount.MountPoint != "" {
|
||||||
|
if layer, ok := r.lookup(mount.ID); ok {
|
||||||
|
mounts[mount.MountPoint] = layer
|
||||||
|
layer.MountPoint = mount.MountPoint
|
||||||
|
layer.MountCount = mount.MountCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
r.bymount = mounts
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *layerStore) Save() error {
|
func (r *layerStore) Save() error {
|
||||||
if !r.IsReadWrite() {
|
if !r.IsReadWrite() {
|
||||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||||
}
|
}
|
||||||
if !r.Locked() {
|
if !r.Locked() {
|
||||||
return errors.New("layer store is not locked")
|
return errors.New("layer store is not locked for writing")
|
||||||
}
|
}
|
||||||
rpath := r.layerspath()
|
rpath := r.layerspath()
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||||
@@ -388,6 +402,25 @@ func (r *layerStore) Save() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !r.IsReadWrite() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.mountsLockfile.Lock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
defer r.mountsLockfile.Touch()
|
||||||
|
return r.saveMounts()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) saveMounts() error {
|
||||||
|
if !r.IsReadWrite() {
|
||||||
|
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||||
|
}
|
||||||
|
if !r.mountsLockfile.Locked() {
|
||||||
|
return errors.New("layer store mount information is not locked for writing")
|
||||||
|
}
|
||||||
mpath := r.mountspath()
|
mpath := r.mountspath()
|
||||||
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -406,11 +439,10 @@ func (r *layerStore) Save() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Touch()
|
return r.loadMounts()
|
||||||
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
|
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
|
||||||
@@ -426,8 +458,13 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap
|
|||||||
}
|
}
|
||||||
lockfile.Lock()
|
lockfile.Lock()
|
||||||
defer lockfile.Unlock()
|
defer lockfile.Unlock()
|
||||||
|
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
rlstore := layerStore{
|
rlstore := layerStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
|
mountsLockfile: mountsLockfile,
|
||||||
driver: driver,
|
driver: driver,
|
||||||
rundir: rundir,
|
rundir: rundir,
|
||||||
layerdir: layerdir,
|
layerdir: layerdir,
|
||||||
@@ -452,6 +489,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
|
|||||||
defer lockfile.Unlock()
|
defer lockfile.Unlock()
|
||||||
rlstore := layerStore{
|
rlstore := layerStore{
|
||||||
lockfile: lockfile,
|
lockfile: lockfile,
|
||||||
|
mountsLockfile: nil,
|
||||||
driver: driver,
|
driver: driver,
|
||||||
rundir: rundir,
|
rundir: rundir,
|
||||||
layerdir: layerdir,
|
layerdir: layerdir,
|
||||||
@@ -673,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Mounted(id string) (int, error) {
|
func (r *layerStore) Mounted(id string) (int, error) {
|
||||||
|
if !r.IsReadWrite() {
|
||||||
|
return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
|
||||||
|
}
|
||||||
|
r.mountsLockfile.RLock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||||
|
if err = r.loadMounts(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, ErrLayerUnknown
|
return 0, ErrLayerUnknown
|
||||||
@@ -684,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||||||
if !r.IsReadWrite() {
|
if !r.IsReadWrite() {
|
||||||
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||||
}
|
}
|
||||||
|
r.mountsLockfile.Lock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||||
|
if err = r.loadMounts(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer r.mountsLockfile.Touch()
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", ErrLayerUnknown
|
return "", ErrLayerUnknown
|
||||||
}
|
}
|
||||||
if layer.MountCount > 0 {
|
if layer.MountCount > 0 {
|
||||||
layer.MountCount++
|
layer.MountCount++
|
||||||
return layer.MountPoint, r.Save()
|
return layer.MountPoint, r.saveMounts()
|
||||||
}
|
}
|
||||||
if options.MountLabel == "" {
|
if options.MountLabel == "" {
|
||||||
options.MountLabel = layer.MountLabel
|
options.MountLabel = layer.MountLabel
|
||||||
@@ -709,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||||||
layer.MountPoint = filepath.Clean(mountpoint)
|
layer.MountPoint = filepath.Clean(mountpoint)
|
||||||
layer.MountCount++
|
layer.MountCount++
|
||||||
r.bymount[layer.MountPoint] = layer
|
r.bymount[layer.MountPoint] = layer
|
||||||
err = r.Save()
|
err = r.saveMounts()
|
||||||
}
|
}
|
||||||
return mountpoint, err
|
return mountpoint, err
|
||||||
}
|
}
|
||||||
@@ -718,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
|||||||
if !r.IsReadWrite() {
|
if !r.IsReadWrite() {
|
||||||
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||||
}
|
}
|
||||||
|
r.mountsLockfile.Lock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||||
|
if err = r.loadMounts(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defer r.mountsLockfile.Touch()
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||||
@@ -731,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
|||||||
}
|
}
|
||||||
if layer.MountCount > 1 {
|
if layer.MountCount > 1 {
|
||||||
layer.MountCount--
|
layer.MountCount--
|
||||||
return true, r.Save()
|
return true, r.saveMounts()
|
||||||
}
|
}
|
||||||
err := r.driver.Put(id)
|
err := r.driver.Put(id)
|
||||||
if err == nil || os.IsNotExist(err) {
|
if err == nil || os.IsNotExist(err) {
|
||||||
@@ -740,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
|||||||
}
|
}
|
||||||
layer.MountCount--
|
layer.MountCount--
|
||||||
layer.MountPoint = ""
|
layer.MountPoint = ""
|
||||||
return false, r.Save()
|
return false, r.saveMounts()
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
||||||
|
if !r.IsReadWrite() {
|
||||||
|
return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
|
||||||
|
}
|
||||||
|
r.mountsLockfile.RLock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||||
|
if err = r.loadMounts(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
layer, ok := r.lookup(id)
|
layer, ok := r.lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, ErrLayerUnknown
|
return nil, nil, ErrLayerUnknown
|
||||||
@@ -862,14 +936,23 @@ func (r *layerStore) Delete(id string) error {
|
|||||||
return ErrLayerUnknown
|
return ErrLayerUnknown
|
||||||
}
|
}
|
||||||
id = layer.ID
|
id = layer.ID
|
||||||
// This check is needed for idempotency of delete where the layer could have been
|
// The layer may already have been explicitly unmounted, but if not, we
|
||||||
// already unmounted (since c/storage gives you that API directly)
|
// should try to clean that up before we start deleting anything at the
|
||||||
for layer.MountCount > 0 {
|
// driver level.
|
||||||
|
mountCount, err := r.Mounted(id)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
|
||||||
|
}
|
||||||
|
for mountCount > 0 {
|
||||||
if _, err := r.Unmount(id, false); err != nil {
|
if _, err := r.Unmount(id, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
mountCount, err = r.Mounted(id)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
|
||||||
}
|
}
|
||||||
err := r.driver.Remove(id)
|
}
|
||||||
|
err = r.driver.Remove(id)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
os.Remove(r.tspath(id))
|
os.Remove(r.tspath(id))
|
||||||
delete(r.byid, id)
|
delete(r.byid, id)
|
||||||
@@ -1235,7 +1318,20 @@ func (r *layerStore) Touch() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) Modified() (bool, error) {
|
func (r *layerStore) Modified() (bool, error) {
|
||||||
return r.lockfile.Modified()
|
var mmodified bool
|
||||||
|
lmodified, err := r.lockfile.Modified()
|
||||||
|
if err != nil {
|
||||||
|
return lmodified, err
|
||||||
|
}
|
||||||
|
if r.IsReadWrite() {
|
||||||
|
r.mountsLockfile.RLock()
|
||||||
|
defer r.mountsLockfile.Unlock()
|
||||||
|
mmodified, err = r.mountsLockfile.Modified()
|
||||||
|
if err != nil {
|
||||||
|
return lmodified, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lmodified || mmodified, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *layerStore) IsReadWrite() bool {
|
func (r *layerStore) IsReadWrite() bool {
|
||||||
|
7
vendor/github.com/containers/storage/lockfile.go
generated
vendored
7
vendor/github.com/containers/storage/lockfile.go
generated
vendored
@@ -35,7 +35,7 @@ type Locker interface {
|
|||||||
// IsReadWrite() checks if the lock file is read-write
|
// IsReadWrite() checks if the lock file is read-write
|
||||||
IsReadWrite() bool
|
IsReadWrite() bool
|
||||||
|
|
||||||
// Locked() checks if lock is locked
|
// Locked() checks if lock is locked for writing by a thread in this process
|
||||||
Locked() bool
|
Locked() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +66,10 @@ func getLockfile(path string, ro bool) (Locker, error) {
|
|||||||
if lockfiles == nil {
|
if lockfiles == nil {
|
||||||
lockfiles = make(map[string]Locker)
|
lockfiles = make(map[string]Locker)
|
||||||
}
|
}
|
||||||
cleanPath := filepath.Clean(path)
|
cleanPath, err := filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
|
||||||
|
}
|
||||||
if locker, ok := lockfiles[cleanPath]; ok {
|
if locker, ok := lockfiles[cleanPath]; ok {
|
||||||
if ro && locker.IsReadWrite() {
|
if ro && locker.IsReadWrite() {
|
||||||
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
|
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
|
||||||
|
49
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
49
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
@@ -32,7 +32,7 @@ func getLockFile(path string, ro bool) (Locker, error) {
|
|||||||
}
|
}
|
||||||
return &lockfile{
|
return &lockfile{
|
||||||
stateMutex: &sync.Mutex{},
|
stateMutex: &sync.Mutex{},
|
||||||
writeMutex: &sync.Mutex{},
|
rwMutex: &sync.RWMutex{},
|
||||||
file: path,
|
file: path,
|
||||||
fd: uintptr(fd),
|
fd: uintptr(fd),
|
||||||
lw: stringid.GenerateRandomID(),
|
lw: stringid.GenerateRandomID(),
|
||||||
@@ -42,10 +42,10 @@ func getLockFile(path string, ro bool) (Locker, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type lockfile struct {
|
type lockfile struct {
|
||||||
// stateMutex is used to synchronize concurrent accesses
|
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
|
||||||
|
rwMutex *sync.RWMutex
|
||||||
|
// stateMutex is used to synchronize concurrent accesses to the state below
|
||||||
stateMutex *sync.Mutex
|
stateMutex *sync.Mutex
|
||||||
// writeMutex is used to serialize and avoid recursive writer locks
|
|
||||||
writeMutex *sync.Mutex
|
|
||||||
counter int64
|
counter int64
|
||||||
file string
|
file string
|
||||||
fd uintptr
|
fd uintptr
|
||||||
@@ -65,23 +65,24 @@ func (l *lockfile) lock(l_type int16) {
|
|||||||
Len: 0,
|
Len: 0,
|
||||||
Pid: int32(os.Getpid()),
|
Pid: int32(os.Getpid()),
|
||||||
}
|
}
|
||||||
if l_type == unix.F_WRLCK {
|
switch l_type {
|
||||||
// If we try to lock as a writer, lock the writerMutex first to
|
case unix.F_RDLCK:
|
||||||
// avoid multiple writer acquisitions of the same process.
|
l.rwMutex.RLock()
|
||||||
// Note: it's important to lock it prior to the stateMutex to
|
case unix.F_WRLCK:
|
||||||
// avoid a deadlock.
|
l.rwMutex.Lock()
|
||||||
l.writeMutex.Lock()
|
default:
|
||||||
|
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
|
||||||
}
|
}
|
||||||
l.stateMutex.Lock()
|
l.stateMutex.Lock()
|
||||||
l.locktype = l_type
|
|
||||||
if l.counter == 0 {
|
if l.counter == 0 {
|
||||||
// Optimization: only use the (expensive) fcntl syscall when
|
// Optimization: only use the (expensive) fcntl syscall when
|
||||||
// the counter is 0. If it's greater than that, we're owning
|
// the counter is 0. In this case, we're either the first
|
||||||
// the lock already and can only be a reader.
|
// reader lock or a writer lock.
|
||||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
l.locktype = l_type
|
||||||
l.locked = true
|
l.locked = true
|
||||||
l.counter++
|
l.counter++
|
||||||
l.stateMutex.Unlock()
|
l.stateMutex.Unlock()
|
||||||
@@ -133,19 +134,28 @@ func (l *lockfile) Unlock() {
|
|||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if l.locktype == unix.F_WRLCK {
|
if l.locktype == unix.F_RDLCK {
|
||||||
l.writeMutex.Unlock()
|
l.rwMutex.RUnlock()
|
||||||
|
} else {
|
||||||
|
l.rwMutex.Unlock()
|
||||||
}
|
}
|
||||||
l.stateMutex.Unlock()
|
l.stateMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locked checks if lockfile is locked.
|
// Locked checks if lockfile is locked for writing by a thread in this process.
|
||||||
func (l *lockfile) Locked() bool {
|
func (l *lockfile) Locked() bool {
|
||||||
return l.locked
|
l.stateMutex.Lock()
|
||||||
|
defer l.stateMutex.Unlock()
|
||||||
|
return l.locked && (l.locktype == unix.F_WRLCK)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Touch updates the lock file with the UID of the user.
|
// Touch updates the lock file with the UID of the user.
|
||||||
func (l *lockfile) Touch() error {
|
func (l *lockfile) Touch() error {
|
||||||
|
l.stateMutex.Lock()
|
||||||
|
if !l.locked || (l.locktype != unix.F_WRLCK) {
|
||||||
|
panic("attempted to update last-writer in lockfile without the write lock")
|
||||||
|
}
|
||||||
|
l.stateMutex.Unlock()
|
||||||
l.lw = stringid.GenerateRandomID()
|
l.lw = stringid.GenerateRandomID()
|
||||||
id := []byte(l.lw)
|
id := []byte(l.lw)
|
||||||
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
||||||
@@ -170,6 +180,11 @@ func (l *lockfile) Touch() error {
|
|||||||
// was loaded.
|
// was loaded.
|
||||||
func (l *lockfile) Modified() (bool, error) {
|
func (l *lockfile) Modified() (bool, error) {
|
||||||
id := []byte(l.lw)
|
id := []byte(l.lw)
|
||||||
|
l.stateMutex.Lock()
|
||||||
|
if !l.locked {
|
||||||
|
panic("attempted to check last-writer in lockfile without locking it first")
|
||||||
|
}
|
||||||
|
l.stateMutex.Unlock()
|
||||||
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, err
|
return true, err
|
||||||
|
98
vendor/github.com/containers/storage/store.go
generated
vendored
98
vendor/github.com/containers/storage/store.go
generated
vendored
@@ -550,10 +550,18 @@ func GetStore(options StoreOptions) (Store, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if options.GraphRoot != "" {
|
if options.GraphRoot != "" {
|
||||||
options.GraphRoot = filepath.Clean(options.GraphRoot)
|
dir, err := filepath.Abs(options.GraphRoot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
|
||||||
|
}
|
||||||
|
options.GraphRoot = dir
|
||||||
}
|
}
|
||||||
if options.RunRoot != "" {
|
if options.RunRoot != "" {
|
||||||
options.RunRoot = filepath.Clean(options.RunRoot)
|
dir, err := filepath.Abs(options.RunRoot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
|
||||||
|
}
|
||||||
|
options.RunRoot = dir
|
||||||
}
|
}
|
||||||
|
|
||||||
storesLock.Lock()
|
storesLock.Lock()
|
||||||
@@ -1321,7 +1329,7 @@ func (s *store) Metadata(id string) (string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1343,7 +1351,7 @@ func (s *store) Metadata(id string) (string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1359,7 +1367,7 @@ func (s *store) Metadata(id string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
cstore.Lock()
|
cstore.RLock()
|
||||||
defer cstore.Unlock()
|
defer cstore.Unlock()
|
||||||
if modified, err := cstore.Modified(); modified || err != nil {
|
if modified, err := cstore.Modified(); modified || err != nil {
|
||||||
if err = cstore.Load(); err != nil {
|
if err = cstore.Load(); err != nil {
|
||||||
@@ -1383,7 +1391,7 @@ func (s *store) ListImageBigData(id string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1409,7 +1417,7 @@ func (s *store) ImageBigDataSize(id, key string) (int64, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1436,7 +1444,7 @@ func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
|
|||||||
stores = append([]ROImageStore{ristore}, stores...)
|
stores = append([]ROImageStore{ristore}, stores...)
|
||||||
for _, r := range stores {
|
for _, r := range stores {
|
||||||
ristore := r
|
ristore := r
|
||||||
ristore.Lock()
|
ristore.RLock()
|
||||||
defer ristore.Unlock()
|
defer ristore.Unlock()
|
||||||
if modified, err := ristore.Modified(); modified || err != nil {
|
if modified, err := ristore.Modified(); modified || err != nil {
|
||||||
if err = ristore.Load(); err != nil {
|
if err = ristore.Load(); err != nil {
|
||||||
@@ -1507,7 +1515,7 @@ func (s *store) ImageSize(id string) (int64, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1529,7 +1537,7 @@ func (s *store) ImageSize(id string) (int64, error) {
|
|||||||
// Look for the image's record.
|
// Look for the image's record.
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1617,7 +1625,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1641,7 +1649,7 @@ func (s *store) ContainerSize(id string) (int64, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1705,7 +1713,7 @@ func (s *store) ListContainerBigData(id string) ([]string, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1721,7 +1729,7 @@ func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1736,7 +1744,7 @@ func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1751,7 +1759,7 @@ func (s *store) ContainerBigData(id, key string) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1787,7 +1795,7 @@ func (s *store) Exists(id string) bool {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1809,7 +1817,7 @@ func (s *store) Exists(id string) bool {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1825,7 +1833,7 @@ func (s *store) Exists(id string) bool {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1912,7 +1920,7 @@ func (s *store) Names(id string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1934,7 +1942,7 @@ func (s *store) Names(id string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1950,7 +1958,7 @@ func (s *store) Names(id string) ([]string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -1974,7 +1982,7 @@ func (s *store) Lookup(name string) (string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -1996,7 +2004,7 @@ func (s *store) Lookup(name string) (string, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2012,7 +2020,7 @@ func (s *store) Lookup(name string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
cstore.Lock()
|
cstore.RLock()
|
||||||
defer cstore.Unlock()
|
defer cstore.Unlock()
|
||||||
if modified, err := cstore.Modified(); modified || err != nil {
|
if modified, err := cstore.Modified(); modified || err != nil {
|
||||||
if err = cstore.Load(); err != nil {
|
if err = cstore.Load(); err != nil {
|
||||||
@@ -2464,7 +2472,7 @@ func (s *store) Mounted(id string) (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
rlstore.Lock()
|
rlstore.RLock()
|
||||||
defer rlstore.Unlock()
|
defer rlstore.Unlock()
|
||||||
if modified, err := rlstore.Modified(); modified || err != nil {
|
if modified, err := rlstore.Modified(); modified || err != nil {
|
||||||
if err = rlstore.Load(); err != nil {
|
if err = rlstore.Load(); err != nil {
|
||||||
@@ -2507,7 +2515,7 @@ func (s *store) Changes(from, to string) ([]archive.Change, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2532,7 +2540,7 @@ func (s *store) DiffSize(from, to string) (int64, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2612,7 +2620,7 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2659,7 +2667,7 @@ func (s *store) LayerSize(id string) (int64, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2678,7 +2686,7 @@ func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
rlstore.Lock()
|
rlstore.RLock()
|
||||||
defer rlstore.Unlock()
|
defer rlstore.Unlock()
|
||||||
if modified, err := rlstore.Modified(); modified || err != nil {
|
if modified, err := rlstore.Modified(); modified || err != nil {
|
||||||
if err = rlstore.Load(); err != nil {
|
if err = rlstore.Load(); err != nil {
|
||||||
@@ -2700,14 +2708,14 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
rlstore.Lock()
|
rlstore.RLock()
|
||||||
defer rlstore.Unlock()
|
defer rlstore.Unlock()
|
||||||
if modified, err := rlstore.Modified(); modified || err != nil {
|
if modified, err := rlstore.Modified(); modified || err != nil {
|
||||||
if err = rlstore.Load(); err != nil {
|
if err = rlstore.Load(); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -2738,7 +2746,7 @@ func (s *store) Layers() ([]Layer, error) {
|
|||||||
|
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2767,7 +2775,7 @@ func (s *store) Images() ([]Image, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2789,7 +2797,7 @@ func (s *store) Containers() ([]Container, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -2811,7 +2819,7 @@ func (s *store) Layer(id string) (*Layer, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2837,7 +2845,7 @@ func (s *store) Image(id string) (*Image, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2870,7 +2878,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
|
|||||||
}
|
}
|
||||||
for _, s := range append([]ROImageStore{istore}, istores...) {
|
for _, s := range append([]ROImageStore{istore}, istores...) {
|
||||||
store := s
|
store := s
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2903,7 +2911,7 @@ func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, store := range append([]ROImageStore{istore}, istores...) {
|
for _, store := range append([]ROImageStore{istore}, istores...) {
|
||||||
store.Lock()
|
store.RLock()
|
||||||
defer store.Unlock()
|
defer store.Unlock()
|
||||||
if modified, err := store.Modified(); modified || err != nil {
|
if modified, err := store.Modified(); modified || err != nil {
|
||||||
if err = store.Load(); err != nil {
|
if err = store.Load(); err != nil {
|
||||||
@@ -2924,7 +2932,7 @@ func (s *store) Container(id string) (*Container, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -2940,7 +2948,7 @@ func (s *store) ContainerLayerID(id string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -2963,7 +2971,7 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -2988,7 +2996,7 @@ func (s *store) ContainerDirectory(id string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
@@ -3015,7 +3023,7 @@ func (s *store) ContainerRunDirectory(id string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
rcstore.Lock()
|
rcstore.RLock()
|
||||||
defer rcstore.Unlock()
|
defer rcstore.Unlock()
|
||||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||||
if err = rcstore.Load(); err != nil {
|
if err = rcstore.Load(); err != nil {
|
||||||
|
Reference in New Issue
Block a user