Update module github.com/containers/storage to v1.47.0

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot]
2023-06-27 00:12:20 +00:00
committed by GitHub
parent 32da299965
commit d72d53cc51
124 changed files with 3194 additions and 1358 deletions

View File

@@ -314,9 +314,6 @@ type rwLayerStore interface {
// Clean up unreferenced layers
GarbageCollect() error
// supportsShifting() returns true if the driver.Driver.SupportsShifting().
supportsShifting() bool
}
type layerStore struct {
@@ -657,7 +654,6 @@ func (r *layerStore) Layers() ([]Layer, error) {
// Requires startWriting.
func (r *layerStore) GarbageCollect() error {
layers, err := r.driver.ListLayers()
if err != nil {
if errors.Is(err, drivers.ErrNotSupported) {
return nil
@@ -864,33 +860,35 @@ func (r *layerStore) loadMounts() error {
return err
}
layerMounts := []layerMountPoint{}
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
// Clear all of our mount information. If another process
// unmounted something, it (along with its zero count) won't
// have been encoded into the version of mountpoints.json that
// we're loading, so our count could fall out of sync with it
// if we don't, and if we subsequently change something else,
// we'd pass that error along to other process that reloaded
// the data after we saved it.
for _, layer := range r.layers {
layer.MountPoint = ""
layer.MountCount = 0
if len(data) != 0 {
if err := json.Unmarshal(data, &layerMounts); err != nil {
return err
}
// All of the non-zero count values will have been encoded, so
// we reset the still-mounted ones based on the contents.
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := r.lookup(mount.ID); ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
// Clear all of our mount information. If another process
// unmounted something, it (along with its zero count) won't
// have been encoded into the version of mountpoints.json that
// we're loading, so our count could fall out of sync with it
// if we don't, and if we subsequently change something else,
// we'd pass that error along to other process that reloaded
// the data after we saved it.
for _, layer := range r.layers {
layer.MountPoint = ""
layer.MountCount = 0
}
// All of the non-zero count values will have been encoded, so
// we reset the still-mounted ones based on the contents.
for _, mount := range layerMounts {
if mount.MountPoint != "" {
if layer, ok := r.lookup(mount.ID); ok {
mounts[mount.MountPoint] = layer
layer.MountPoint = mount.MountPoint
layer.MountCount = mount.MountCount
}
}
err = nil
}
r.bymount = mounts
return err
return nil
}
// save saves the contents of the store to disk.
@@ -920,13 +918,21 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
}
r.lockfile.AssertLockedForWriting()
// This must be done before we write the file, because the process could be terminated
// after the file is written but before the lock file is updated.
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
location := layerLocationFromIndex(locationIndex)
if location&saveLocations == 0 {
continue
}
rpath := r.jsonPath[locationIndex]
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
return err
}
subsetLayers := make([]*Layer, 0, len(r.layers))
@@ -944,16 +950,11 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
if location == volatileLayerLocation {
opts.NoSync = true
}
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0600, &opts); err != nil {
if err := ioutils.AtomicWriteFileWithOpts(rpath, jldata, 0o600, &opts); err != nil {
return err
}
r.layerspathsModified[locationIndex] = opts.ModTime
}
lw, err := r.lockfile.RecordWrite()
if err != nil {
return err
}
r.lastWrite = lw
return nil
}
@@ -965,7 +966,7 @@ func (r *layerStore) saveMounts() error {
}
r.mountsLockfile.AssertLockedForWriting()
mpath := r.mountspath()
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
if err := os.MkdirAll(filepath.Dir(mpath), 0o700); err != nil {
return err
}
mounts := make([]layerMountPoint, 0, len(r.layers))
@@ -982,22 +983,26 @@ func (r *layerStore) saveMounts() error {
if err != nil {
return err
}
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
return err
}
// This must be done before we write the file, because the process could be terminated
// after the file is written but before the lock file is updated.
lw, err := r.mountsLockfile.RecordWrite()
if err != nil {
return err
}
r.mountsLastWrite = lw
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0o600); err != nil {
return err
}
return r.loadMounts()
}
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
if err := os.MkdirAll(rundir, 0700); err != nil {
if err := os.MkdirAll(rundir, 0o700); err != nil {
return nil, err
}
if err := os.MkdirAll(layerdir, 0700); err != nil {
if err := os.MkdirAll(layerdir, 0o700); err != nil {
return nil, err
}
// Note: While the containers.lock file is in rundir for transient stores
@@ -1213,10 +1218,10 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
if err := os.MkdirAll(r.rundir, 0700); err != nil {
if err := os.MkdirAll(r.rundir, 0o700); err != nil {
return nil, -1, err
}
if err := os.MkdirAll(r.layerdir, 0700); err != nil {
if err := os.MkdirAll(r.layerdir, 0o700); err != nil {
return nil, -1, err
}
if id == "" {
@@ -1690,7 +1695,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700)
err := os.MkdirAll(r.datadir(layer.ID), 0o700)
if err != nil {
return err
}
@@ -1698,7 +1703,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
// NewAtomicFileWriter doesn't overwrite/truncate the existing inode.
// BigData() relies on this behaviour when opening the file for read
// so that it is either accessing the old data or the new one.
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0600)
writer, err := ioutils.NewAtomicFileWriter(r.datapath(layer.ID, key), 0o600)
if err != nil {
return fmt.Errorf("opening bigdata file: %w", err)
}
@@ -1922,6 +1927,18 @@ func (r *layerStore) Wipe() error {
return err
}
}
ids, err := r.driver.ListLayers()
if err != nil {
if !errors.Is(err, drivers.ErrNotSupported) {
return err
}
ids = nil
}
for _, id := range ids {
if err := r.driver.Remove(id); err != nil {
return err
}
}
return nil
}
@@ -2198,7 +2215,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
return -1, err
}
compression := archive.DetectCompression(header[:n])
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), diff)
defragmented := io.MultiReader(bytes.NewReader(header[:n]), diff)
// Decide if we need to compute digests
var compressedDigest, uncompressedDigest digest.Digest // = ""
@@ -2226,54 +2243,63 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
defragmented = io.TeeReader(defragmented, compressedCounter)
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
if err != nil {
return -1, err
}
defer uncompressed.Close()
uidLog := make(map[uint32]struct{})
gidLog := make(map[uint32]struct{})
idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) {
uidLog[uint32(h.Uid)] = struct{}{}
gidLog[uint32(h.Gid)] = struct{}{}
var uncompressedCounter *ioutils.WriteCounter
size, err = func() (int64, error) { // A scope for defer
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
return -1, err
}
})
defer compressor.Close() // This must happen before tsdata is consumed.
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
metadata := storage.NewJSONPacker(compressor)
uncompressed, err := archive.DecompressStream(defragmented)
if err != nil {
return -1, err
}
defer uncompressed.Close()
idLogger, err := tarlog.NewLogger(func(h *tar.Header) {
if !strings.HasPrefix(path.Base(h.Name), archive.WhiteoutPrefix) {
uidLog[uint32(h.Uid)] = struct{}{}
gidLog[uint32(h.Gid)] = struct{}{}
}
})
if err != nil {
return -1, err
}
defer idLogger.Close() // This must happen before uidLog and gidLog is consumed.
uncompressedCounter = ioutils.NewWriteCounter(idLogger)
uncompressedWriter := (io.Writer)(uncompressedCounter)
if uncompressedDigester != nil {
uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash())
}
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter())
if err != nil {
return -1, err
}
options := drivers.ApplyDiffOpts{
Diff: payload,
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
}
size, err := r.driver.ApplyDiff(layer.ID, layer.Parent, options)
if err != nil {
return -1, err
}
return size, err
}()
if err != nil {
return -1, err
}
defer idLogger.Close()
uncompressedCounter := ioutils.NewWriteCounter(idLogger)
uncompressedWriter := (io.Writer)(uncompressedCounter)
if uncompressedDigester != nil {
uncompressedWriter = io.MultiWriter(uncompressedWriter, uncompressedDigester.Hash())
}
payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedWriter), metadata, storage.NewDiscardFilePutter())
if err != nil {
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return -1, err
}
options := drivers.ApplyDiffOpts{
Diff: payload,
Mappings: r.layerMappings(layer),
MountLabel: layer.MountLabel,
}
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, options)
if err != nil {
return -1, err
}
compressor.Close()
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
return -1, err
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return -1, err
}
if compressedDigester != nil {
@@ -2366,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.UncompressedDigest = diffOutput.UncompressedDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
if err = r.saveFor(layer); err != nil {
return err
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
compressor.Close()
return err
}
compressor.Close()
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return err
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return err
}
}
for k, v := range diffOutput.BigData {
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
@@ -2377,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
if err = r.saveFor(layer); err != nil {
return err
}
return err
}
@@ -2443,10 +2490,6 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
return r.layersByDigestMap(r.byuncompressedsum, d)
}
func (r *layerStore) supportsShifting() bool {
return r.driver.SupportsShifting()
}
func closeAll(closes ...func() error) (rErr error) {
for _, f := range closes {
if err := f(); err != nil {