mirror of
https://github.com/containers/skopeo.git
synced 2025-09-25 20:29:24 +00:00
Bump github.com/containers/storage from 1.41.0 to 1.42.0
Bumps [github.com/containers/storage](https://github.com/containers/storage) from 1.41.0 to 1.42.0. - [Release notes](https://github.com/containers/storage/releases) - [Changelog](https://github.com/containers/storage/blob/main/docs/containers-storage-changes.md) - [Commits](https://github.com/containers/storage/compare/v1.41.0...v1.42.0) --- updated-dependencies: - dependency-name: github.com/containers/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
9
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
@@ -63,7 +62,7 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
return fmt.Errorf("empty archive")
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
@@ -115,7 +114,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
|
||||
return fmt.Errorf("creating pipe extract data to %q: %w", dest, err)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
@@ -134,11 +133,11 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
|
||||
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
|
||||
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
21
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
Normal file
21
vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
)
|
||||
|
||||
func chroot(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
||||
dest string,
|
||||
options *archive.TarOptions, root string) error {
|
||||
return archive.Unpack(decompressedArchive, dest, options)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
return archive.TarWithOptions(srcPath, options)
|
||||
}
|
23
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
23
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
@@ -1,9 +1,11 @@
|
||||
// +build !windows
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -15,7 +17,6 @@ import (
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// untar is the entry-point for storage-untar on re-exec. This is not used on
|
||||
@@ -69,7 +70,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
// child
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Untar pipe failure: %v", err)
|
||||
return fmt.Errorf("untar pipe failure: %w", err)
|
||||
}
|
||||
|
||||
if root != "" {
|
||||
@@ -96,13 +97,13 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
||||
return fmt.Errorf("untar error on re-exec cmd: %w", err)
|
||||
}
|
||||
|
||||
//write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
||||
return fmt.Errorf("untar json encode to pipe failed: %w", err)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
@@ -112,7 +113,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
// pending on write pipe forever
|
||||
io.Copy(ioutil.Discard, decompressedArchive)
|
||||
|
||||
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
|
||||
return fmt.Errorf("processing tar file(%s): %w", output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -184,22 +185,24 @@ func invokePack(srcPath string, options *archive.TarOptions, root string) (io.Re
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting options pipe for tar process")
|
||||
return nil, fmt.Errorf("getting options pipe for tar process: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "tar error on re-exec cmd")
|
||||
return nil, fmt.Errorf("tar error on re-exec cmd: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("processing tar file(%s): %w", errBuff, err)
|
||||
}
|
||||
tarW.CloseWithError(err)
|
||||
}()
|
||||
|
||||
if err := json.NewEncoder(stdin).Encode(options); err != nil {
|
||||
stdin.Close()
|
||||
return nil, errors.Wrap(err, "tar json encode to pipe failed")
|
||||
return nil, fmt.Errorf("tar json encode to pipe failed: %w", err)
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
|
18
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
generated
vendored
@@ -36,7 +36,7 @@ func chroot(path string) (err error) {
|
||||
}
|
||||
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
return fmt.Errorf("creating mount namespace before pivot: %w", err)
|
||||
}
|
||||
|
||||
// make everything in new ns private
|
||||
@@ -53,7 +53,7 @@ func chroot(path string) (err error) {
|
||||
// setup oldRoot for pivot_root
|
||||
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting up pivot dir: %v", err)
|
||||
return fmt.Errorf("setting up pivot dir: %w", err)
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
@@ -72,7 +72,7 @@ func chroot(path string) (err error) {
|
||||
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||
// because we already cleaned it up on failed pivot_root
|
||||
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
|
||||
errCleanup = fmt.Errorf("cleaning up after pivot: %w", errCleanup)
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
@@ -82,7 +82,7 @@ func chroot(path string) (err error) {
|
||||
if err := unix.PivotRoot(path, pivotDir); err != nil {
|
||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||
if err := os.Remove(pivotDir); err != nil {
|
||||
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||
return fmt.Errorf("cleaning up after failed pivot: %w", err)
|
||||
}
|
||||
return realChroot(path)
|
||||
}
|
||||
@@ -93,17 +93,17 @@ func chroot(path string) (err error) {
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root: %v", err)
|
||||
return fmt.Errorf("changing to new root: %w", err)
|
||||
}
|
||||
|
||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||
return fmt.Errorf("making old root private after pivot: %w", err)
|
||||
}
|
||||
|
||||
// Now unmount the old root so it's no longer visible from the new root
|
||||
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||
return fmt.Errorf("while unmounting old root after pivot: %w", err)
|
||||
}
|
||||
mounted = false
|
||||
|
||||
@@ -112,10 +112,10 @@ func chroot(path string) (err error) {
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||
return fmt.Errorf("after fallback to chroot: %w", err)
|
||||
}
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||
return fmt.Errorf("changing to new root after chroot: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !windows,!linux
|
||||
// +build !windows,!linux,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
|
41
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
Normal file
41
vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
)
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
|
||||
}
|
||||
|
||||
s, err := archive.UnpackLayer(dest, layer, options)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
11
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
11
vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
generated
vendored
@@ -1,4 +1,5 @@
|
||||
//+build !windows
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
@@ -68,7 +69,7 @@ func applyLayer() {
|
||||
|
||||
encoder := json.NewEncoder(os.Stdout)
|
||||
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
||||
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
||||
fatal(fmt.Errorf("unable to encode layerSize JSON: %w", err))
|
||||
}
|
||||
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
@@ -104,7 +105,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
|
||||
|
||||
data, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
|
||||
return 0, fmt.Errorf("ApplyLayer json encode: %w", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-applyLayer", dest)
|
||||
@@ -115,14 +116,14 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
||||
return 0, fmt.Errorf("ApplyLayer stdout: %s stderr: %s %w", outBuf, errBuf, err)
|
||||
}
|
||||
|
||||
// Stdout should be a valid JSON struct representing an applyLayerResponse.
|
||||
response := applyLayerResponse{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err = decoder.Decode(&response); err != nil {
|
||||
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
||||
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %w", err)
|
||||
}
|
||||
|
||||
return response.LayerSize, nil
|
||||
|
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
Normal file
4
vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
package chrootarchive
|
||||
|
||||
func init() {
|
||||
}
|
2
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !windows
|
||||
// +build !windows,!darwin
|
||||
|
||||
package chrootarchive
|
||||
|
||||
|
Reference in New Issue
Block a user