mirror of
https://github.com/containers/skopeo.git
synced 2025-04-27 19:05:32 +00:00
1072 lines
34 KiB
Go
1072 lines
34 KiB
Go
package storage
|
||
|
||
import (
|
||
"fmt"
|
||
"os"
|
||
"path/filepath"
|
||
"slices"
|
||
"strings"
|
||
"sync"
|
||
"time"
|
||
|
||
"github.com/containers/storage/pkg/ioutils"
|
||
"github.com/containers/storage/pkg/lockfile"
|
||
"github.com/containers/storage/pkg/stringid"
|
||
"github.com/containers/storage/pkg/stringutils"
|
||
"github.com/containers/storage/pkg/truncindex"
|
||
digest "github.com/opencontainers/go-digest"
|
||
"github.com/sirupsen/logrus"
|
||
)
|
||
|
||
const (
|
||
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
|
||
// names which we consider to be manifests, used for computing a
|
||
// "digest" value for the image as a whole, by which we can locate the
|
||
// image later.
|
||
ImageDigestManifestBigDataNamePrefix = "manifest"
|
||
// ImageDigestBigDataKey is provided for compatibility with older
|
||
// versions of the image library. It will be removed in the future.
|
||
ImageDigestBigDataKey = "manifest"
|
||
)
|
||
|
||
// An Image is a reference to a layer and an associated metadata string.
|
||
type Image struct {
|
||
// ID is either one which was specified at create-time, or a random
|
||
// value which was generated by the library.
|
||
ID string `json:"id"`
|
||
|
||
// Digest is a digest value that we can use to locate the image, if one
|
||
// was specified at creation-time.
|
||
Digest digest.Digest `json:"digest,omitempty"`
|
||
|
||
// Digests is a list of digest values of the image's manifests, and
|
||
// possibly a manually-specified value, that we can use to locate the
|
||
// image. If Digest is set, its value is also in this list.
|
||
Digests []digest.Digest `json:"-"`
|
||
|
||
// Names is an optional set of user-defined convenience values. The
|
||
// image can be referred to by its ID or any of its names. Names are
|
||
// unique among images, and are often the text representation of tagged
|
||
// or canonical references.
|
||
Names []string `json:"names,omitempty"`
|
||
|
||
// NamesHistory is an optional set of Names the image had in the past. The
|
||
// contained names are free from any duplicates, whereas the newest entry
|
||
// is the first one.
|
||
NamesHistory []string `json:"names-history,omitempty"`
|
||
|
||
// TopLayer is the ID of the topmost layer of the image itself, if the
|
||
// image contains one or more layers. Multiple images can refer to the
|
||
// same top layer.
|
||
TopLayer string `json:"layer,omitempty"`
|
||
|
||
// MappedTopLayers are the IDs of alternate versions of the top layer
|
||
// which have the same contents and parent, and which differ from
|
||
// TopLayer only in which ID mappings they use. When the image is
|
||
// to be removed, they should be removed before the TopLayer, as the
|
||
// graph driver may depend on that.
|
||
MappedTopLayers []string `json:"mapped-layers,omitempty"`
|
||
|
||
// Metadata is data we keep for the convenience of the caller. It is not
|
||
// expected to be large, since it is kept in memory.
|
||
Metadata string `json:"metadata,omitempty"`
|
||
|
||
// BigDataNames is a list of names of data items that we keep for the
|
||
// convenience of the caller. They can be large, and are only in
|
||
// memory when being read from or written to disk.
|
||
BigDataNames []string `json:"big-data-names,omitempty"`
|
||
|
||
// BigDataSizes maps the names in BigDataNames to the sizes of the data
|
||
// that has been stored, if they're known.
|
||
BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"`
|
||
|
||
// BigDataDigests maps the names in BigDataNames to the digests of the
|
||
// data that has been stored, if they're known.
|
||
BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"`
|
||
|
||
// Created is the datestamp for when this image was created. Older
|
||
// versions of the library did not track this information, so callers
|
||
// will likely want to use the IsZero() method to verify that a value
|
||
// is set before using it.
|
||
Created time.Time `json:"created,omitempty"`
|
||
|
||
// ReadOnly is true if this image resides in a read-only layer store.
|
||
ReadOnly bool `json:"-"`
|
||
|
||
Flags map[string]any `json:"flags,omitempty"`
|
||
}
|
||
|
||
// roImageStore provides bookkeeping for information about Images.
|
||
type roImageStore interface {
|
||
roMetadataStore
|
||
roBigDataStore
|
||
|
||
// startReading makes sure the store is fresh, and locks it for reading.
|
||
// If this succeeds, the caller MUST call stopReading().
|
||
startReading() error
|
||
|
||
// stopReading releases locks obtained by startReading.
|
||
stopReading()
|
||
|
||
// Exists checks if there is an image with the given ID or name.
|
||
Exists(id string) bool
|
||
|
||
// Get retrieves information about an image given an ID or name.
|
||
Get(id string) (*Image, error)
|
||
|
||
// Images returns a slice enumerating the known images.
|
||
Images() ([]Image, error)
|
||
|
||
// ByDigest returns a slice enumerating the images which have either an
|
||
// explicitly-set digest, or a big data item with a name that starts
|
||
// with ImageDigestManifestBigDataNamePrefix, which matches the
|
||
// specified digest.
|
||
ByDigest(d digest.Digest) ([]*Image, error)
|
||
}
|
||
|
||
// rwImageStore provides bookkeeping for information about Images.
|
||
type rwImageStore interface {
|
||
roImageStore
|
||
rwMetadataStore
|
||
rwImageBigDataStore
|
||
flaggableStore
|
||
|
||
// startWriting makes sure the store is fresh, and locks it for writing.
|
||
// If this succeeds, the caller MUST call stopWriting().
|
||
startWriting() error
|
||
|
||
// stopWriting releases locks obtained by startWriting.
|
||
stopWriting()
|
||
|
||
// create creates an image that has a specified ID (or a random one) and
|
||
// optional names, using the specified layer as its topmost (hopefully
|
||
// read-only) layer. That layer can be referenced by multiple images.
|
||
create(id string, names []string, layer string, options ImageOptions) (*Image, error)
|
||
|
||
// updateNames modifies names associated with an image based on (op, names).
|
||
// The values are expected to be valid normalized
|
||
// named image references.
|
||
updateNames(id string, names []string, op updateNameOperation) error
|
||
|
||
// Delete removes the record of the image.
|
||
Delete(id string) error
|
||
|
||
addMappedTopLayer(id, layer string) error
|
||
removeMappedTopLayer(id, layer string) error
|
||
|
||
// Clean up unreferenced per-image data.
|
||
GarbageCollect() error
|
||
|
||
// Wipe removes records of all images.
|
||
Wipe() error
|
||
}
|
||
|
||
type imageStore struct {
|
||
// The following fields are only set when constructing imageStore, and must never be modified afterwards.
|
||
// They are safe to access without any other locking.
|
||
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores.
|
||
dir string
|
||
|
||
inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held.
|
||
// The following fields can only be read/written with read/write ownership of inProcessLock, respectively.
|
||
// Almost all users should use startReading() or startWriting().
|
||
lastWrite lockfile.LastWrite
|
||
images []*Image
|
||
idindex *truncindex.TruncIndex
|
||
byid map[string]*Image
|
||
byname map[string]*Image
|
||
bydigest map[digest.Digest][]*Image
|
||
}
|
||
|
||
func copyImage(i *Image) *Image {
|
||
return &Image{
|
||
ID: i.ID,
|
||
Digest: i.Digest,
|
||
Digests: copySlicePreferringNil(i.Digests),
|
||
Names: copySlicePreferringNil(i.Names),
|
||
NamesHistory: copySlicePreferringNil(i.NamesHistory),
|
||
TopLayer: i.TopLayer,
|
||
MappedTopLayers: copySlicePreferringNil(i.MappedTopLayers),
|
||
Metadata: i.Metadata,
|
||
BigDataNames: copySlicePreferringNil(i.BigDataNames),
|
||
BigDataSizes: copyMapPreferringNil(i.BigDataSizes),
|
||
BigDataDigests: copyMapPreferringNil(i.BigDataDigests),
|
||
Created: i.Created,
|
||
ReadOnly: i.ReadOnly,
|
||
Flags: copyMapPreferringNil(i.Flags),
|
||
}
|
||
}
|
||
|
||
func copyImageSlice(slice []*Image) []*Image {
|
||
if len(slice) > 0 {
|
||
cp := make([]*Image, len(slice))
|
||
for i := range slice {
|
||
cp[i] = copyImage(slice[i])
|
||
}
|
||
return cp
|
||
}
|
||
return nil
|
||
}
|
||
|
||
// startWritingWithReload makes sure the store is fresh if canReload, and locks it for writing.
|
||
// If this succeeds, the caller MUST call stopWriting().
|
||
//
|
||
// This is an internal implementation detail of imageStore construction, every other caller
|
||
// should use startReading() instead.
|
||
func (r *imageStore) startWritingWithReload(canReload bool) error {
|
||
r.lockfile.Lock()
|
||
r.inProcessLock.Lock()
|
||
succeeded := false
|
||
defer func() {
|
||
if !succeeded {
|
||
r.inProcessLock.Unlock()
|
||
r.lockfile.Unlock()
|
||
}
|
||
}()
|
||
|
||
if canReload {
|
||
if _, err := r.reloadIfChanged(true); err != nil {
|
||
return err
|
||
}
|
||
}
|
||
|
||
succeeded = true
|
||
return nil
|
||
}
|
||
|
||
// startWriting makes sure the store is fresh, and locks it for writing.
|
||
// If this succeeds, the caller MUST call stopWriting().
|
||
func (r *imageStore) startWriting() error {
|
||
return r.startWritingWithReload(true)
|
||
}
|
||
|
||
// stopWriting releases locks obtained by startWriting.
|
||
func (r *imageStore) stopWriting() {
|
||
r.inProcessLock.Unlock()
|
||
r.lockfile.Unlock()
|
||
}
|
||
|
||
// startReadingWithReload makes sure the store is fresh if canReload, and locks it for reading.
|
||
// If this succeeds, the caller MUST call stopReading().
|
||
//
|
||
// This is an internal implementation detail of imageStore construction, every other caller
|
||
// should use startReading() instead.
|
||
func (r *imageStore) startReadingWithReload(canReload bool) error {
|
||
// inProcessLocked calls the nested function with r.inProcessLock held for writing.
|
||
inProcessLocked := func(fn func() error) error {
|
||
r.inProcessLock.Lock()
|
||
defer r.inProcessLock.Unlock()
|
||
return fn()
|
||
}
|
||
|
||
r.lockfile.RLock()
|
||
unlockFn := r.lockfile.Unlock // A function to call to clean up, or nil
|
||
defer func() {
|
||
if unlockFn != nil {
|
||
unlockFn()
|
||
}
|
||
}()
|
||
r.inProcessLock.RLock()
|
||
unlockFn = r.stopReading
|
||
|
||
if canReload {
|
||
// If we are lucky, we can just hold the read locks, check that we are fresh, and continue.
|
||
_, modified, err := r.modified()
|
||
if err != nil {
|
||
return err
|
||
}
|
||
if modified {
|
||
// We are unlucky, and need to reload.
|
||
// NOTE: Multiple goroutines can get to this place approximately simultaneously.
|
||
r.inProcessLock.RUnlock()
|
||
unlockFn = r.lockfile.Unlock
|
||
|
||
// r.lastWrite can change at this point if another goroutine reloads the store before us. That’s why we don’t unconditionally
|
||
// trigger a load below; we (lock and) reloadIfChanged() again.
|
||
|
||
// First try reloading with r.lockfile held for reading.
|
||
// r.inProcessLock will serialize all goroutines that got here;
|
||
// each will re-check on-disk state vs. r.lastWrite, and the first one will actually reload the data.
|
||
var tryLockedForWriting bool
|
||
if err := inProcessLocked(func() error {
|
||
// We could optimize this further: The r.lockfile.GetLastWrite() value shouldn’t change as long as we hold r.lockfile,
|
||
// so if r.lastWrite was already updated, we don’t need to actually read the on-filesystem lock.
|
||
var err error
|
||
tryLockedForWriting, err = r.reloadIfChanged(false)
|
||
return err
|
||
}); err != nil {
|
||
if !tryLockedForWriting {
|
||
return err
|
||
}
|
||
// Not good enough, we need r.lockfile held for writing. So, let’s do that.
|
||
unlockFn()
|
||
unlockFn = nil
|
||
|
||
r.lockfile.Lock()
|
||
unlockFn = r.lockfile.Unlock
|
||
if err := inProcessLocked(func() error {
|
||
_, err := r.reloadIfChanged(true)
|
||
return err
|
||
}); err != nil {
|
||
return err
|
||
}
|
||
unlockFn()
|
||
unlockFn = nil
|
||
|
||
r.lockfile.RLock()
|
||
unlockFn = r.lockfile.Unlock
|
||
// We need to check for a reload once more because the on-disk state could have been modified
|
||
// after we released the lock.
|
||
// If that, _again_, finds inconsistent state, just give up.
|
||
// We could, plausibly, retry a few times, but that inconsistent state (duplicate image names)
|
||
// shouldn’t be saved (by correct implementations) in the first place.
|
||
if err := inProcessLocked(func() error {
|
||
_, err := r.reloadIfChanged(false)
|
||
return err
|
||
}); err != nil {
|
||
return fmt.Errorf("(even after successfully cleaning up once:) %w", err)
|
||
}
|
||
}
|
||
|
||
// NOTE that we hold neither a read nor write inProcessLock at this point. That’s fine in ordinary operation, because
|
||
// the on-filesystem r.lockfile should protect us against (cooperating) writers, and any use of r.inProcessLock
|
||
// protects us against in-process writers modifying data.
|
||
// In presence of non-cooperating writers, we just ensure that 1) the in-memory data is not clearly out-of-date
|
||
// and 2) access to the in-memory data is not racy;
|
||
// but we can’t protect against those out-of-process writers modifying _files_ while we are assuming they are in a consistent state.
|
||
|
||
r.inProcessLock.RLock()
|
||
}
|
||
}
|
||
|
||
unlockFn = nil
|
||
return nil
|
||
}
|
||
|
||
// startReading makes sure the store is fresh, and locks it for reading.
|
||
// If this succeeds, the caller MUST call stopReading().
|
||
func (r *imageStore) startReading() error {
|
||
return r.startReadingWithReload(true)
|
||
}
|
||
|
||
// stopReading releases locks obtained by startReading.
|
||
func (r *imageStore) stopReading() {
|
||
r.inProcessLock.RUnlock()
|
||
r.lockfile.Unlock()
|
||
}
|
||
|
||
// modified returns true if the on-disk state has changed (i.e. if reloadIfChanged may need to modify the store),
|
||
// and a lockfile.LastWrite value for that update.
|
||
//
|
||
// The caller must hold r.lockfile for reading _or_ writing.
|
||
// The caller must hold r.inProcessLock for reading or writing.
|
||
func (r *imageStore) modified() (lockfile.LastWrite, bool, error) {
|
||
return r.lockfile.ModifiedSince(r.lastWrite)
|
||
}
|
||
|
||
// reloadIfChanged reloads the contents of the store from disk if it is changed.
|
||
//
|
||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||
// if it is held for writing.
|
||
//
|
||
// The caller must hold r.inProcessLock for WRITING.
|
||
//
|
||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||
// reloadIfChanged() with lockedForWriting could succeed.
|
||
func (r *imageStore) reloadIfChanged(lockedForWriting bool) (bool, error) {
|
||
lastWrite, modified, err := r.modified()
|
||
if err != nil {
|
||
return false, err
|
||
}
|
||
// We require callers to always hold r.inProcessLock for WRITING, even if they might not end up calling r.load()
|
||
// and modify no fields, to ensure they see fresh data:
|
||
// r.lockfile.Modified() only returns true once per change. Without an exclusive lock,
|
||
// one goroutine might see r.lockfile.Modified() == true and decide to load, and in the meanwhile another one could
|
||
// see r.lockfile.Modified() == false and proceed to use in-memory data without noticing it is stale.
|
||
if modified {
|
||
if tryLockedForWriting, err := r.load(lockedForWriting); err != nil {
|
||
return tryLockedForWriting, err // r.lastWrite is unchanged, so we will load the next time again.
|
||
}
|
||
r.lastWrite = lastWrite
|
||
}
|
||
return false, nil
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) Images() ([]Image, error) {
|
||
images := make([]Image, len(r.images))
|
||
for i := range r.images {
|
||
images[i] = *copyImage(r.images[i])
|
||
}
|
||
return images, nil
|
||
}
|
||
|
||
// This looks for datadirs in the store directory that are not referenced
|
||
// by the json file and removes it. These can happen in the case of unclean
|
||
// shutdowns.
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) GarbageCollect() error {
|
||
entries, err := os.ReadDir(r.dir)
|
||
if err != nil {
|
||
// Unexpected, don't try any GC
|
||
return err
|
||
}
|
||
|
||
for _, entry := range entries {
|
||
id := entry.Name()
|
||
// Does it look like a datadir directory?
|
||
if !entry.IsDir() || stringid.ValidateID(id) != nil {
|
||
continue
|
||
}
|
||
|
||
// Should the id be there?
|
||
if r.byid[id] != nil {
|
||
continue
|
||
}
|
||
|
||
// Otherwise remove datadir
|
||
logrus.Debugf("removing %q", filepath.Join(r.dir, id))
|
||
moreErr := os.RemoveAll(filepath.Join(r.dir, id))
|
||
// Propagate first error
|
||
if moreErr != nil && err == nil {
|
||
err = moreErr
|
||
}
|
||
}
|
||
|
||
return err
|
||
}
|
||
|
||
func (r *imageStore) imagespath() string {
|
||
return filepath.Join(r.dir, "images.json")
|
||
}
|
||
|
||
func (r *imageStore) datadir(id string) string {
|
||
return filepath.Join(r.dir, id)
|
||
}
|
||
|
||
func (r *imageStore) datapath(id, key string) string {
|
||
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||
}
|
||
|
||
// bigDataNameIsManifest determines if a big data item with the specified name
|
||
// is considered to be representative of the image, in that its digest can be
|
||
// said to also be the image's digest. Currently, if its name is, or begins
|
||
// with, "manifest", we say that it is.
|
||
func bigDataNameIsManifest(name string) bool {
|
||
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
|
||
}
|
||
|
||
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
|
||
// list of the unique values that would identify the image.
|
||
// The caller must hold r.inProcessLock for writing.
|
||
func (i *Image) recomputeDigests() error {
|
||
validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1)
|
||
digests := make(map[digest.Digest]struct{})
|
||
if i.Digest != "" {
|
||
if err := i.Digest.Validate(); err != nil {
|
||
return fmt.Errorf("validating image digest %q: %w", string(i.Digest), err)
|
||
}
|
||
digests[i.Digest] = struct{}{}
|
||
validDigests = append(validDigests, i.Digest)
|
||
}
|
||
for name, digest := range i.BigDataDigests {
|
||
if !bigDataNameIsManifest(name) {
|
||
continue
|
||
}
|
||
if err := digest.Validate(); err != nil {
|
||
return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err)
|
||
}
|
||
// Deduplicate the digest values.
|
||
if _, known := digests[digest]; !known {
|
||
digests[digest] = struct{}{}
|
||
validDigests = append(validDigests, digest)
|
||
}
|
||
}
|
||
if i.Digest == "" && len(validDigests) > 0 {
|
||
i.Digest = validDigests[0]
|
||
}
|
||
i.Digests = validDigests
|
||
return nil
|
||
}
|
||
|
||
// load reloads the contents of the store from disk.
|
||
//
|
||
// Most callers should call reloadIfChanged() instead, to avoid overhead and to correctly
|
||
// manage r.lastWrite.
|
||
//
|
||
// The caller must hold r.lockfile for reading _or_ writing; lockedForWriting is true
|
||
// if it is held for writing.
|
||
// The caller must hold r.inProcessLock for WRITING.
|
||
//
|
||
// If !lockedForWriting and this function fails, the return value indicates whether
|
||
// retrying with lockedForWriting could succeed.
|
||
func (r *imageStore) load(lockedForWriting bool) (bool, error) {
|
||
rpath := r.imagespath()
|
||
data, err := os.ReadFile(rpath)
|
||
if err != nil && !os.IsNotExist(err) {
|
||
return false, err
|
||
}
|
||
|
||
images := []*Image{}
|
||
if len(data) != 0 {
|
||
if err := json.Unmarshal(data, &images); err != nil {
|
||
return false, fmt.Errorf("loading %q: %w", rpath, err)
|
||
}
|
||
}
|
||
idlist := make([]string, 0, len(images))
|
||
ids := make(map[string]*Image)
|
||
names := make(map[string]*Image)
|
||
digests := make(map[digest.Digest][]*Image)
|
||
var errorToResolveBySaving error // == nil
|
||
for n, image := range images {
|
||
ids[image.ID] = images[n]
|
||
idlist = append(idlist, image.ID)
|
||
for _, name := range image.Names {
|
||
if conflict, ok := names[name]; ok {
|
||
r.removeName(conflict, name)
|
||
errorToResolveBySaving = ErrDuplicateImageNames
|
||
}
|
||
}
|
||
// Compute the digest list.
|
||
if err := image.recomputeDigests(); err != nil {
|
||
return false, fmt.Errorf("computing digests for image with ID %q (%v): %w", image.ID, image.Names, err)
|
||
}
|
||
for _, name := range image.Names {
|
||
names[name] = image
|
||
}
|
||
for _, digest := range image.Digests {
|
||
list := digests[digest]
|
||
digests[digest] = append(list, image)
|
||
}
|
||
image.ReadOnly = !r.lockfile.IsReadWrite()
|
||
}
|
||
|
||
if errorToResolveBySaving != nil {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return false, errorToResolveBySaving
|
||
}
|
||
if !lockedForWriting {
|
||
return true, errorToResolveBySaving
|
||
}
|
||
}
|
||
r.images = images
|
||
r.idindex = truncindex.NewTruncIndex(idlist) // Invalid values in idlist are ignored: they are not a reason to refuse processing the whole store.
|
||
r.byid = ids
|
||
r.byname = names
|
||
r.bydigest = digests
|
||
if errorToResolveBySaving != nil {
|
||
return false, r.Save()
|
||
}
|
||
return false, nil
|
||
}
|
||
|
||
// Save saves the contents of the store to disk.
|
||
// The caller must hold r.lockfile locked for writing.
|
||
// The caller must hold r.inProcessLock for reading (but usually holds it for writing in order to make the desired changes).
|
||
func (r *imageStore) Save() error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to modify the image store at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
r.lockfile.AssertLockedForWriting()
|
||
rpath := r.imagespath()
|
||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||
return err
|
||
}
|
||
jdata, err := json.Marshal(&r.images)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
// This must be done before we write the file, because the process could be terminated
|
||
// after the file is written but before the lock file is updated.
|
||
lw, err := r.lockfile.RecordWrite()
|
||
if err != nil {
|
||
return err
|
||
}
|
||
r.lastWrite = lw
|
||
if err := ioutils.AtomicWriteFile(rpath, jdata, 0o600); err != nil {
|
||
return err
|
||
}
|
||
return nil
|
||
}
|
||
|
||
func newImageStore(dir string) (rwImageStore, error) {
|
||
if err := os.MkdirAll(dir, 0o700); err != nil {
|
||
return nil, err
|
||
}
|
||
lockfile, err := lockfile.GetLockFile(filepath.Join(dir, "images.lock"))
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
istore := imageStore{
|
||
lockfile: lockfile,
|
||
dir: dir,
|
||
|
||
images: []*Image{},
|
||
byid: make(map[string]*Image),
|
||
byname: make(map[string]*Image),
|
||
bydigest: make(map[digest.Digest][]*Image),
|
||
}
|
||
if err := istore.startWritingWithReload(false); err != nil {
|
||
return nil, err
|
||
}
|
||
defer istore.stopWriting()
|
||
istore.lastWrite, err = istore.lockfile.GetLastWrite()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
if _, err := istore.load(true); err != nil {
|
||
return nil, err
|
||
}
|
||
return &istore, nil
|
||
}
|
||
|
||
func newROImageStore(dir string) (roImageStore, error) {
|
||
lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock"))
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
istore := imageStore{
|
||
lockfile: lockfile,
|
||
dir: dir,
|
||
|
||
images: []*Image{},
|
||
byid: make(map[string]*Image),
|
||
byname: make(map[string]*Image),
|
||
bydigest: make(map[digest.Digest][]*Image),
|
||
}
|
||
if err := istore.startReadingWithReload(false); err != nil {
|
||
return nil, err
|
||
}
|
||
defer istore.stopReading()
|
||
istore.lastWrite, err = istore.lockfile.GetLastWrite()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
if _, err := istore.load(false); err != nil {
|
||
return nil, err
|
||
}
|
||
return &istore, nil
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) lookup(id string) (*Image, bool) {
|
||
if image, ok := r.byid[id]; ok {
|
||
return image, ok
|
||
} else if image, ok := r.byname[id]; ok {
|
||
return image, ok
|
||
} else if longid, err := r.idindex.Get(id); err == nil {
|
||
image, ok := r.byid[longid]
|
||
return image, ok
|
||
}
|
||
return nil, false
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) ClearFlag(id string, flag string) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to clear flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
delete(image.Flags, flag)
|
||
return r.Save()
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) SetFlag(id string, flag string, value any) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to set flags on images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
if image.Flags == nil {
|
||
image.Flags = make(map[string]any)
|
||
}
|
||
image.Flags[flag] = value
|
||
return r.Save()
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) create(id string, names []string, layer string, options ImageOptions) (image *Image, err error) {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
if id == "" {
|
||
id = stringid.GenerateRandomID()
|
||
_, idInUse := r.byid[id]
|
||
for idInUse {
|
||
id = stringid.GenerateRandomID()
|
||
_, idInUse = r.byid[id]
|
||
}
|
||
}
|
||
if _, idInUse := r.byid[id]; idInUse {
|
||
return nil, fmt.Errorf("an image with ID %q already exists: %w", id, ErrDuplicateID)
|
||
}
|
||
names = dedupeStrings(names)
|
||
for _, name := range names {
|
||
if image, nameInUse := r.byname[name]; nameInUse {
|
||
return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
|
||
}
|
||
}
|
||
image = &Image{
|
||
ID: id,
|
||
Digest: options.Digest,
|
||
Digests: dedupeDigests(options.Digests),
|
||
Names: names,
|
||
NamesHistory: copySlicePreferringNil(options.NamesHistory),
|
||
TopLayer: layer,
|
||
Metadata: options.Metadata,
|
||
BigDataNames: []string{},
|
||
BigDataSizes: make(map[string]int64),
|
||
BigDataDigests: make(map[string]digest.Digest),
|
||
Created: options.CreationDate,
|
||
Flags: newMapFrom(options.Flags),
|
||
}
|
||
if image.Created.IsZero() {
|
||
image.Created = time.Now().UTC()
|
||
}
|
||
err = image.recomputeDigests()
|
||
if err != nil {
|
||
return nil, fmt.Errorf("validating digests for new image: %w", err)
|
||
}
|
||
r.images = append(r.images, image)
|
||
// This can only fail on duplicate IDs, which shouldn’t happen — and in
|
||
// that case the index is already in the desired state anyway.
|
||
// Implementing recovery from an unlikely and unimportant failure here
|
||
// would be too risky.
|
||
_ = r.idindex.Add(id)
|
||
r.byid[id] = image
|
||
for _, name := range names {
|
||
r.byname[name] = image
|
||
}
|
||
for _, digest := range image.Digests {
|
||
list := r.bydigest[digest]
|
||
r.bydigest[digest] = append(list, image)
|
||
}
|
||
defer func() {
|
||
if err != nil {
|
||
// now that the in-memory structures know about the new
|
||
// record, we can use regular Delete() to clean up if
|
||
// anything breaks from here on out
|
||
if e := r.Delete(id); e != nil {
|
||
logrus.Debugf("while cleaning up partially-created image %q we failed to create: %v", id, e)
|
||
}
|
||
}
|
||
}()
|
||
err = r.Save()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
for _, item := range options.BigData {
|
||
if item.Digest == "" {
|
||
item.Digest = digest.Canonical.FromBytes(item.Data)
|
||
}
|
||
if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil {
|
||
return nil, err
|
||
}
|
||
}
|
||
image = copyImage(image)
|
||
return image, err
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) addMappedTopLayer(id, layer string) error {
|
||
if image, ok := r.lookup(id); ok {
|
||
image.MappedTopLayers = append(image.MappedTopLayers, layer)
|
||
return r.Save()
|
||
}
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) removeMappedTopLayer(id, layer string) error {
|
||
if image, ok := r.lookup(id); ok {
|
||
initialLen := len(image.MappedTopLayers)
|
||
image.MappedTopLayers = stringutils.RemoveFromSlice(image.MappedTopLayers, layer)
|
||
// No layer was removed. No need to save.
|
||
if initialLen == len(image.MappedTopLayers) {
|
||
return nil
|
||
}
|
||
return r.Save()
|
||
}
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) Metadata(id string) (string, error) {
|
||
if image, ok := r.lookup(id); ok {
|
||
return image.Metadata, nil
|
||
}
|
||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) SetMetadata(id, metadata string) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to modify image metadata at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
if image, ok := r.lookup(id); ok {
|
||
image.Metadata = metadata
|
||
return r.Save()
|
||
}
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
|
||
// The caller must hold r.inProcessLock for writing.
|
||
func (r *imageStore) removeName(image *Image, name string) {
|
||
image.Names = stringSliceWithoutValue(image.Names, name)
|
||
}
|
||
|
||
// The caller must hold r.inProcessLock for writing.
|
||
func (i *Image) addNameToHistory(name string) {
|
||
i.NamesHistory = dedupeStrings(append([]string{name}, i.NamesHistory...))
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) updateNames(id string, names []string, op updateNameOperation) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to change image name assignments at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
oldNames := image.Names
|
||
names, err := applyNameOperation(oldNames, names, op)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
for _, name := range oldNames {
|
||
delete(r.byname, name)
|
||
}
|
||
for _, name := range names {
|
||
if otherImage, ok := r.byname[name]; ok {
|
||
r.removeName(otherImage, name)
|
||
}
|
||
r.byname[name] = image
|
||
image.addNameToHistory(name)
|
||
}
|
||
image.Names = names
|
||
return r.Save()
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) Delete(id string) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
id = image.ID
|
||
delete(r.byid, id)
|
||
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||
_ = r.idindex.Delete(id)
|
||
for _, name := range image.Names {
|
||
delete(r.byname, name)
|
||
}
|
||
for _, digest := range image.Digests {
|
||
prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
|
||
return i == image
|
||
})
|
||
if len(prunedList) == 0 {
|
||
delete(r.bydigest, digest)
|
||
} else {
|
||
r.bydigest[digest] = prunedList
|
||
}
|
||
}
|
||
r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
|
||
return candidate.ID == id
|
||
})
|
||
if err := r.Save(); err != nil {
|
||
return err
|
||
}
|
||
if err := os.RemoveAll(r.datadir(id)); err != nil {
|
||
return err
|
||
}
|
||
return nil
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) Get(id string) (*Image, error) {
|
||
if image, ok := r.lookup(id); ok {
|
||
return copyImage(image), nil
|
||
}
|
||
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) Exists(id string) bool {
|
||
_, ok := r.lookup(id)
|
||
return ok
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
|
||
if images, ok := r.bydigest[d]; ok {
|
||
return copyImageSlice(images), nil
|
||
}
|
||
return nil, fmt.Errorf("locating image with digest %q: %w", d, ErrImageUnknown)
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) BigData(id, key string) ([]byte, error) {
|
||
if key == "" {
|
||
return nil, fmt.Errorf("can't retrieve image big data value for empty name: %w", ErrInvalidBigDataName)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
return os.ReadFile(r.datapath(image.ID, key))
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
|
||
if key == "" {
|
||
return -1, fmt.Errorf("can't retrieve size of image big data with empty name: %w", ErrInvalidBigDataName)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return -1, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
if size, ok := image.BigDataSizes[key]; ok { // This is valid, and returns ok == false, for BigDataSizes == nil.
|
||
return size, nil
|
||
}
|
||
if data, err := r.BigData(id, key); err == nil && data != nil {
|
||
return int64(len(data)), nil
|
||
}
|
||
return -1, ErrSizeUnknown
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) {
|
||
if key == "" {
|
||
return "", fmt.Errorf("can't retrieve digest of image big data value with empty name: %w", ErrInvalidBigDataName)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
if d, ok := image.BigDataDigests[key]; ok { // This is valid, and returns ok == false, for BigDataDigests == nil.
|
||
return d, nil
|
||
}
|
||
return "", ErrDigestUnknown
|
||
}
|
||
|
||
// Requires startReading or startWriting.
|
||
func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
return copySlicePreferringNil(image.BigDataNames), nil
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
image, ok := r.lookup(id)
|
||
if !ok {
|
||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||
}
|
||
var err error
|
||
var newDigest digest.Digest
|
||
if bigDataNameIsManifest(key) {
|
||
if digestManifest == nil {
|
||
return fmt.Errorf("digesting manifest: no manifest digest callback provided: %w", ErrDigestUnknown)
|
||
}
|
||
if newDigest, err = digestManifest(data); err != nil {
|
||
return fmt.Errorf("digesting manifest: %w", err)
|
||
}
|
||
} else {
|
||
newDigest = digest.Canonical.FromBytes(data)
|
||
}
|
||
return r.setBigData(image, key, data, newDigest)
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest digest.Digest) error {
|
||
if key == "" {
|
||
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
|
||
}
|
||
err := os.MkdirAll(r.datadir(image.ID), 0o700)
|
||
if err != nil {
|
||
return err
|
||
}
|
||
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0o600)
|
||
if err == nil {
|
||
save := false
|
||
if image.BigDataSizes == nil {
|
||
image.BigDataSizes = make(map[string]int64)
|
||
}
|
||
oldSize, sizeOk := image.BigDataSizes[key]
|
||
image.BigDataSizes[key] = int64(len(data))
|
||
if image.BigDataDigests == nil {
|
||
image.BigDataDigests = make(map[string]digest.Digest)
|
||
}
|
||
oldDigest, digestOk := image.BigDataDigests[key]
|
||
image.BigDataDigests[key] = newDigest
|
||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||
save = true
|
||
}
|
||
if !slices.Contains(image.BigDataNames, key) {
|
||
image.BigDataNames = append(image.BigDataNames, key)
|
||
save = true
|
||
}
|
||
for _, oldDigest := range image.Digests {
|
||
// remove the image from the list of images in the digest-based index
|
||
if list, ok := r.bydigest[oldDigest]; ok {
|
||
prunedList := slices.DeleteFunc(list, func(i *Image) bool {
|
||
return i == image
|
||
})
|
||
if len(prunedList) == 0 {
|
||
delete(r.bydigest, oldDigest)
|
||
} else {
|
||
r.bydigest[oldDigest] = prunedList
|
||
}
|
||
}
|
||
}
|
||
if err = image.recomputeDigests(); err != nil {
|
||
return fmt.Errorf("loading recomputing image digest information for %s: %w", image.ID, err)
|
||
}
|
||
for _, newDigest := range image.Digests {
|
||
// add the image to the list of images in the digest-based index which
|
||
// corresponds to the new digest for this item, unless it's already there
|
||
list := r.bydigest[newDigest]
|
||
if !slices.Contains(list, image) {
|
||
r.bydigest[newDigest] = append(list, image)
|
||
}
|
||
}
|
||
if save {
|
||
err = r.Save()
|
||
}
|
||
}
|
||
return err
|
||
}
|
||
|
||
// Requires startWriting.
|
||
func (r *imageStore) Wipe() error {
|
||
if !r.lockfile.IsReadWrite() {
|
||
return fmt.Errorf("not allowed to delete images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
|
||
}
|
||
ids := make([]string, 0, len(r.byid))
|
||
for id := range r.byid {
|
||
ids = append(ids, id)
|
||
}
|
||
for _, id := range ids {
|
||
if err := r.Delete(id); err != nil {
|
||
return err
|
||
}
|
||
}
|
||
return nil
|
||
}
|