Vendor in latest containers/storage and containers/image

Update containers/storage and containers/image to define location of local storage.

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh 2019-03-21 07:55:02 -04:00
parent d93a581fb8
commit 032309941b
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
20 changed files with 385 additions and 100 deletions

View File

@ -2,13 +2,13 @@
github.com/urfave/cli v1.20.0
github.com/kr/pretty v0.1.0
github.com/kr/text v0.1.0
github.com/containers/image 4153c049af593f5abeab913595b288d109c6f2a7
github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/containers/storage 3c88d700b4daf494818633be242f02ccaccc6e72
github.com/containers/storage v1.12.1
github.com/sirupsen/logrus v1.0.0
github.com/go-check/check v1
github.com/stretchr/testify v1.1.3

View File

@ -197,7 +197,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
registry := reference.Domain(ref.ref)
username, password, err := config.GetAuthentication(sys, reference.Domain(ref.ref))
username, password, err := config.GetAuthentication(sys, registry)
if err != nil {
return nil, errors.Wrapf(err, "error getting username and password")
}

View File

@ -16,7 +16,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
@ -129,7 +129,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
if err != nil {
return types.BlobInfo{}, err
}

View File

@ -11,7 +11,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -96,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache)
if err != nil {
return nil, err
}
@ -252,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
logrus.Debugf("Uploading empty layer during conversion to schema 1")
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
// and anyway this blob is so small that its easier to just copy it than to worry about figuring out another location where to get it.
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}

View File

@ -7,7 +7,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -61,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
if m.src == nil {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache)
if err != nil {
return nil, err
}

View File

@ -1,4 +1,5 @@
package blobinfocache
// Package boltdb implements a BlobInfoCache backed by BoltDB.
package boltdb
import (
"fmt"
@ -7,6 +8,7 @@ import (
"time"
"github.com/boltdb/bolt"
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@ -81,22 +83,23 @@ func unlockPath(path string) {
}
}
// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
//
// Note that we dont keep the database open across operations, because that would lock the file and block any other
// users; instead, we need to open/close it for every single write or lookup.
type boltDBCache struct {
type cache struct {
path string
}
// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
// Most users should call DefaultCache instead.
func NewBoltDBCache(path string) types.BlobInfoCache {
return &boltDBCache{path: path}
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
//
// Most users should call blobinfocache.DefaultCache instead.
func New(path string) types.BlobInfoCache {
return &cache{path: path}
}
// view returns runs the specified fn within a read-only transaction on the database.
func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
// a read lock, blocking any future writes.
@ -122,7 +125,7 @@ func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
}
// update returns runs the specified fn within a read-write transaction on the database.
func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
lockPath(bdc.path)
defer unlockPath(bdc.path)
db, err := bolt.Open(bdc.path, 0600, nil)
@ -139,7 +142,7 @@ func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
}
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
d, err := digest.Parse(string(uncompressedBytes))
@ -166,7 +169,7 @@ func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest)
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
var res digest.Digest
if err := bdc.view(func(tx *bolt.Tx) error {
res = bdc.uncompressedDigest(tx, anyDigest)
@ -182,7 +185,7 @@ func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
if err != nil {
@ -219,7 +222,7 @@ func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
if err != nil {
@ -248,8 +251,8 @@ func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scop
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
b := scopeBucket.Bucket([]byte(digest.String()))
if b == nil {
return candidates
@ -259,12 +262,12 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
if err := t.UnmarshalBinary(v); err != nil {
return err
}
candidates = append(candidates, candidateWithTime{
candidate: types.BICReplacementCandidate{
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: types.BICLocationReference{Opaque: string(k)},
},
lastSeen: t,
LastSeen: t,
})
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
@ -277,8 +280,8 @@ func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
res := []candidateWithTime{}
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
res := []prioritize.CandidateWithTime{}
var uncompressedDigestValue digest.Digest // = ""
if err := bdc.view(func(tx *bolt.Tx) error {
scopeBucket := tx.Bucket(knownLocationsBucket)
@ -325,5 +328,5 @@ func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
}

View File

@ -5,6 +5,8 @@ import (
"os"
"path/filepath"
"github.com/containers/image/pkg/blobinfocache/boltdb"
"github.com/containers/image/pkg/blobinfocache/memory"
"github.com/containers/image/types"
"github.com/sirupsen/logrus"
)
@ -50,14 +52,14 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
dir, err := blobInfoCacheDir(sys, os.Geteuid())
if err != nil {
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
return NewMemoryCache()
return memory.New()
}
path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil {
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
return NewMemoryCache()
return memory.New()
}
logrus.Debugf("Using blob info cache at %s", path)
return NewBoltDBCache(path)
return boltdb.New(path)
}

View File

@ -1,4 +1,6 @@
package blobinfocache
// Package prioritize provides utilities for prioritizing locations in
// types.BlobInfoCache.CandidateLocations.
package prioritize
import (
"sort"
@ -13,16 +15,16 @@ import (
// This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5
// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
type candidateWithTime struct {
candidate types.BICReplacementCandidate // The replacement candidate
lastSeen time.Time // Time the candidate was last known to exist (either read or written)
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate types.BICReplacementCandidate // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
// along with the specially-treated digest values for the implementation of sort.Interface.Less
type candidateSortState struct {
cs []candidateWithTime // The entries to sort
cs []CandidateWithTime // The entries to sort
primaryDigest digest.Digest // The digest the user actually asked for
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
}
@ -40,35 +42,35 @@ func (css *candidateSortState) Less(i, j int) bool {
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
// First, deal with the primaryDigest/uncompressedDigest cases:
if xi.candidate.Digest != xj.candidate.Digest {
if xi.Candidate.Digest != xj.Candidate.Digest {
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
if xi.candidate.Digest == css.primaryDigest {
if xi.Candidate.Digest == css.primaryDigest {
return true
}
if xj.candidate.Digest == css.primaryDigest {
if xj.Candidate.Digest == css.primaryDigest {
return false
}
if css.uncompressedDigest != "" {
if xi.candidate.Digest == css.uncompressedDigest {
if xi.Candidate.Digest == css.uncompressedDigest {
return false
}
if xj.candidate.Digest == css.uncompressedDigest {
if xj.Candidate.Digest == css.uncompressedDigest {
return true
}
}
} else { // xi.candidate.Digest == xj.candidate.Digest
} else { // xi.Candidate.Digest == xj.Candidate.Digest
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
return xi.lastSeen.After(xj.lastSeen)
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
return xi.LastSeen.After(xj.LastSeen)
}
}
// Neither of the digests are primaryDigest/uncompressedDigest:
if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
return xi.lastSeen.After(xj.lastSeen)
if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time
return xi.LastSeen.After(xj.LastSeen)
}
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
return xi.candidate.Digest < xj.candidate.Digest
return xi.Candidate.Digest < xj.Candidate.Digest
}
func (css *candidateSortState) Swap(i, j int) {
@ -77,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
sort.Sort(&candidateSortState{
@ -92,17 +94,17 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime,
}
res := make([]types.BICReplacementCandidate, resLength)
for i := range res {
res[i] = cs[i].candidate
res[i] = cs[i].Candidate
}
return res
}
// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
}

View File

@ -1,9 +1,11 @@
package blobinfocache
// Package memory implements an in-memory BlobInfoCache.
package memory
import (
"sync"
"time"
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
@ -16,8 +18,8 @@ type locationKey struct {
blobDigest digest.Digest
}
// memoryCache implements an in-memory-only BlobInfoCache
type memoryCache struct {
// cache implements an in-memory-only BlobInfoCache
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
@ -25,12 +27,16 @@ type memoryCache struct {
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
}
// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
// This is primarily intended for tests, but also used as a fallback if DefaultCache
// cant determine, or set up, the location for a persistent cache.
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
func NewMemoryCache() types.BlobInfoCache {
return &memoryCache{
// New returns a BlobInfoCache implementation which is in-memory only.
//
// This is primarily intended for tests, but also used as a fallback
// if blobinfocache.DefaultCache cant determine, or set up, the
// location for a persistent cache. Most users should use
// blobinfocache.DefaultCache. instead of calling this directly.
// Manual users of types.{ImageSource,ImageDestination} might also use
// this instead of a persistent cache.
func New() types.BlobInfoCache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
@ -40,14 +46,14 @@ func NewMemoryCache() types.BlobInfoCache {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
mem.mutex.Lock()
defer mem.mutex.Unlock()
return mem.uncompressedDigestLocked(anyDigest)
}
// uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held.
func (mem *memoryCache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest {
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
return d
}
@ -65,7 +71,7 @@ func (mem *memoryCache) uncompressedDigestLocked(anyDigest digest.Digest) digest
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
@ -83,7 +89,7 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
@ -95,16 +101,16 @@ func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scop
locationScope[location] = time.Now() // Possibly overwriting an older entry.
}
// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations {
candidates = append(candidates, candidateWithTime{
candidate: types.BICReplacementCandidate{
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: l,
},
lastSeen: t,
LastSeen: t,
})
}
return candidates
@ -116,10 +122,10 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []candidateWithTime{}
res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
@ -135,5 +141,5 @@ func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope
}
}
}
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
}

View File

@ -1,4 +1,5 @@
package blobinfocache
// Package none implements a dummy BlobInfoCache which records no data.
package none
import (
"github.com/containers/image/types"
@ -11,9 +12,10 @@ type noCache struct {
// NoCache implements BlobInfoCache by not recording any data.
//
// This exists primarily for implementations of configGetter for Manifest.Inspect,
// because configs only have one representation.
// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
// This exists primarily for implementations of configGetter for
// Manifest.Inspect, because configs only have one representation.
// Any use of BlobInfoCache with blobs should usually use at least a
// short-lived cache, ideally blobinfocache.DefaultCache.
var NoCache types.BlobInfoCache = noCache{}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.

View File

@ -18,7 +18,7 @@ import (
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/blobinfocache"
"github.com/containers/image/pkg/blobinfocache/none"
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
@ -595,12 +595,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
// Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seeen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
if err != nil {
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
}

View File

@ -4,6 +4,7 @@ package storage
import (
"fmt"
"os"
"path/filepath"
"strings"
@ -180,7 +181,10 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
options := storage.DefaultStoreOptions
options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid())
if err != nil {
return nil, err
}
options.UIDMap = s.defaultUIDMap
options.GIDMap = s.defaultGIDMap
store, err := storage.GetStore(options)

View File

@ -1,7 +1,7 @@
github.com/containers/image
github.com/sirupsen/logrus v1.0.0
github.com/containers/storage master
github.com/containers/storage v1.12.1
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716

View File

@ -19,6 +19,7 @@ import (
"syscall"
"time"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
@ -212,7 +213,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
return nil
}
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
if err := idtools.SafeLchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}

View File

@ -636,7 +636,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if chownOpts == nil {
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
if err := idtools.SafeLchown(path, chownOpts.UID, chownOpts.GID); err != nil {
return err
}
}

View File

@ -7,6 +7,7 @@ import (
"strings"
"syscall"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
@ -130,7 +131,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
if err := idtools.SafeChown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}

View File

@ -7,6 +7,9 @@ import (
"sort"
"strconv"
"strings"
"syscall"
"github.com/pkg/errors"
)
// IDMap contains a single entry for user namespace range remapping. An array
@ -277,3 +280,18 @@ func parseSubidFile(path, username string) (ranges, error) {
}
return rangeList, nil
}
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
return errors.Wrapf(err, "there might not be enough IDs available in the namespace (requested %d:%d for %s)", uid, gid, name)
}
return err
}
func SafeChown(name string, uid, gid int) error {
return checkChownErr(os.Chown(name, uid, gid), name, uid, gid)
}
func SafeLchown(name string, uid, gid int) error {
return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid)
}

View File

@ -30,7 +30,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
paths = []string{path}
} else if err == nil && chownExisting {
// short-circuit--we were called with an existing directory and chown was requested
return os.Chown(path, ownerUID, ownerGID)
return SafeChown(path, ownerUID, ownerGID)
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
@ -60,7 +60,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}

View File

@ -32,7 +32,7 @@ import (
var (
// DefaultStoreOptions is a reasonable default set of options.
DefaultStoreOptions StoreOptions
defaultStoreOptions StoreOptions
stores []*store
storesLock sync.Mutex
)
@ -550,7 +550,7 @@ type store struct {
// }
func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = DefaultStoreOptions
options = defaultStoreOptions
}
if options.GraphRoot != "" {
@ -3217,8 +3217,20 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
return ret
}
// DefaultConfigFile path to the system wide storage.conf file
const DefaultConfigFile = "/etc/containers/storage.conf"
// defaultConfigFile path to the system wide storage.conf file
const defaultConfigFile = "/etc/containers/storage.conf"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
home, err := homeDir()
if err != nil {
return "", errors.Wrapf(err, "cannot determine users homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
return defaultConfigFile, nil
}
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
@ -3358,19 +3370,19 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
}
func init() {
DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
DefaultStoreOptions.GraphDriverName = ""
defaultStoreOptions.RunRoot = "/var/run/containers/storage"
defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
defaultStoreOptions.GraphDriverName = ""
ReloadConfigurationFile(DefaultConfigFile, &DefaultStoreOptions)
ReloadConfigurationFile(defaultConfigFile, &defaultStoreOptions)
}
func GetDefaultMountOptions() ([]string, error) {
mountOpts := []string{
".mountopt",
fmt.Sprintf("%s.mountopt", DefaultStoreOptions.GraphDriverName),
fmt.Sprintf("%s.mountopt", defaultStoreOptions.GraphDriverName),
}
for _, option := range DefaultStoreOptions.GraphDriverOptions {
for _, option := range defaultStoreOptions.GraphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err

234
vendor/github.com/containers/storage/utils.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
package storage
import (
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping
func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap string) (*IDMappingOptions, error) {
options := IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
}
if subGIDMap == "" && subUIDMap != "" {
subGIDMap = subUIDMap
}
if subUIDMap == "" && subGIDMap != "" {
subUIDMap = subGIDMap
}
if len(GIDMapSlice) == 0 && len(UIDMapSlice) != 0 {
GIDMapSlice = UIDMapSlice
}
if len(UIDMapSlice) == 0 && len(GIDMapSlice) != 0 {
UIDMapSlice = GIDMapSlice
}
if len(UIDMapSlice) == 0 && subUIDMap == "" && os.Getuid() != 0 {
UIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getuid())}
}
if len(GIDMapSlice) == 0 && subGIDMap == "" && os.Getuid() != 0 {
GIDMapSlice = []string{fmt.Sprintf("0:%d:1", os.Getgid())}
}
if subUIDMap != "" && subGIDMap != "" {
mappings, err := idtools.NewIDMappings(subUIDMap, subGIDMap)
if err != nil {
return nil, errors.Wrapf(err, "failed to create NewIDMappings for uidmap=%s gidmap=%s", subUIDMap, subGIDMap)
}
options.UIDMap = mappings.UIDs()
options.GIDMap = mappings.GIDs()
}
parsedUIDMap, err := idtools.ParseIDMap(UIDMapSlice, "UID")
if err != nil {
return nil, errors.Wrapf(err, "failed to create ParseUIDMap UID=%s", UIDMapSlice)
}
parsedGIDMap, err := idtools.ParseIDMap(GIDMapSlice, "GID")
if err != nil {
return nil, errors.Wrapf(err, "failed to create ParseGIDMap GID=%s", UIDMapSlice)
}
options.UIDMap = append(options.UIDMap, parsedUIDMap...)
options.GIDMap = append(options.GIDMap, parsedGIDMap...)
if len(options.UIDMap) > 0 {
options.HostUIDMapping = false
}
if len(options.GIDMap) > 0 {
options.HostGIDMapping = false
}
return &options, nil
}
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
st, err := system.Stat(tmpDir)
if err == nil && int(st.UID()) == os.Getuid() && st.Mode() == 0700 {
return tmpDir, nil
}
}
tmpDir := fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
if err := os.MkdirAll(tmpDir, 0700); err != nil {
logrus.Errorf("failed to create %s: %v", tmpDir, err)
} else {
return tmpDir, nil
}
home, err := homeDir()
if err != nil {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", errors.Wrapf(err, "cannot resolve %s", home)
}
return filepath.Join(resolvedHome, "rundir"), nil
}
// getRootlessDirInfo returns the parent path of where the storage for containers and
// volumes will be in rootless mode
func getRootlessDirInfo(rootlessUid int) (string, string, error) {
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)
if err != nil {
return "", "", err
}
dataDir := os.Getenv("XDG_DATA_HOME")
if dataDir == "" {
home, err := homeDir()
if err != nil {
return "", "", errors.Wrapf(err, "neither XDG_DATA_HOME nor HOME was set non-empty")
}
// runc doesn't like symlinks in the rootfs path, and at least
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
resolvedHome, err := filepath.EvalSymlinks(home)
if err != nil {
return "", "", errors.Wrapf(err, "cannot resolve %s", home)
}
dataDir = filepath.Join(resolvedHome, ".local", "share")
}
return dataDir, rootlessRuntime, nil
}
// getRootlessStorageOpts returns the storage opts for containers running as non root
func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
var opts StoreOptions
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)
if err != nil {
return opts, err
}
opts.RunRoot = rootlessRuntime
opts.GraphRoot = filepath.Join(dataDir, "containers", "storage")
if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
opts.GraphDriverName = "overlay"
opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
} else {
opts.GraphDriverName = "vfs"
}
return opts, nil
}
type tomlOptionsConfig struct {
MountProgram string `toml:"mount_program"`
}
func getTomlStorage(storeOptions *StoreOptions) *tomlConfig {
config := new(tomlConfig)
config.Storage.Driver = storeOptions.GraphDriverName
config.Storage.RunRoot = storeOptions.RunRoot
config.Storage.GraphRoot = storeOptions.GraphRoot
for _, i := range storeOptions.GraphDriverOptions {
s := strings.Split(i, "=")
if s[0] == "overlay.mount_program" {
config.Storage.Options.MountProgram = s[1]
}
}
return config
}
// DefaultStoreOptions returns the default storage ops for containers
func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
var (
defaultRootlessRunRoot string
defaultRootlessGraphRoot string
err error
)
storageOpts := defaultStoreOptions
if rootless {
storageOpts, err = getRootlessStorageOpts(rootlessUid)
if err != nil {
return storageOpts, err
}
}
storageConf, err := DefaultConfigFile(rootless)
if err != nil {
return storageOpts, err
}
if _, err = os.Stat(storageConf); err == nil {
defaultRootlessRunRoot = storageOpts.RunRoot
defaultRootlessGraphRoot = storageOpts.GraphRoot
storageOpts = StoreOptions{}
ReloadConfigurationFile(storageConf, &storageOpts)
}
if !os.IsNotExist(err) {
return storageOpts, errors.Wrapf(err, "cannot stat %s", storageConf)
}
if rootless {
if err == nil {
// If the file did not specify a graphroot or runroot,
// set sane defaults so we don't try and use root-owned
// directories
if storageOpts.RunRoot == "" {
storageOpts.RunRoot = defaultRootlessRunRoot
}
if storageOpts.GraphRoot == "" {
storageOpts.GraphRoot = defaultRootlessGraphRoot
}
} else {
if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil {
return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf))
}
file, err := os.OpenFile(storageConf, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return storageOpts, errors.Wrapf(err, "cannot open %s", storageConf)
}
tomlConfiguration := getTomlStorage(&storageOpts)
defer file.Close()
enc := toml.NewEncoder(file)
if err := enc.Encode(tomlConfiguration); err != nil {
os.Remove(storageConf)
return storageOpts, errors.Wrapf(err, "failed to encode %s", storageConf)
}
}
}
return storageOpts, nil
}
func homeDir() (string, error) {
home := os.Getenv("HOME")
if home == "" {
usr, err := user.Current()
if err != nil {
return "", errors.Wrapf(err, "neither XDG_RUNTIME_DIR nor HOME was set non-empty")
}
home = usr.HomeDir
}
return home, nil
}