mirror of
https://github.com/containers/skopeo.git
synced 2025-09-25 12:16:17 +00:00
Update buildah to 1.8.4, c/storage to 1.12.10
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
36
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
36
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
stdtar "archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -34,18 +34,34 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
return untarHandler(tarArchive, dest, options, true, dest)
|
||||
}
|
||||
|
||||
// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
|
||||
// The root directory is the directory that will be chrooted to.
|
||||
// `dest` must be a path within `root`, if it is not an error will be returned.
|
||||
//
|
||||
// `root` should set to a directory which is not controlled by any potentially
|
||||
// malicious process.
|
||||
//
|
||||
// This should be used to prevent a potential attacker from manipulating `dest`
|
||||
// such that it would provide access to files outside of `dest` through things
|
||||
// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
|
||||
// sanitizing symlinks in this manner is inherrently racey:
|
||||
// ref: CVE-2018-15664
|
||||
func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
return untarHandler(tarArchive, dest, options, true, root)
|
||||
}
|
||||
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
return untarHandler(tarArchive, dest, options, false, dest)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
@@ -77,7 +93,15 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
r = decompressedArchive
|
||||
}
|
||||
|
||||
return invokeUnpack(r, dest, options)
|
||||
return invokeUnpack(r, dest, options, root)
|
||||
}
|
||||
|
||||
// Tar tars the requested path while chrooted to the specified root.
|
||||
func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
}
|
||||
return invokePack(srcPath, options, root)
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
@@ -99,7 +123,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := tar.NewReader(contentReader)
|
||||
t := stdtar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
|
130
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
130
vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
generated
vendored
@@ -10,10 +10,13 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// untar is the entry-point for storage-untar on re-exec. This is not used on
|
||||
@@ -23,18 +26,28 @@ func untar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
var options *archive.TarOptions
|
||||
var options archive.TarOptions
|
||||
|
||||
//read the options from the pipe "ExtraFiles"
|
||||
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
dst := flag.Arg(0)
|
||||
var root string
|
||||
if len(flag.Args()) > 1 {
|
||||
root = flag.Arg(1)
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = dst
|
||||
}
|
||||
|
||||
if err := chroot(root); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
|
||||
if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
// fully consume stdin in case it is zero padded
|
||||
@@ -45,7 +58,10 @@ func untar() {
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
|
||||
if root == "" {
|
||||
return errors.New("must specify a root to chroot to")
|
||||
}
|
||||
|
||||
// We can't pass a potentially large exclude list directly via cmd line
|
||||
// because we easily overrun the kernel's max argument/environment size
|
||||
@@ -57,7 +73,21 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
return fmt.Errorf("Untar pipe failure: %v", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-untar", dest)
|
||||
if root != "" {
|
||||
relDest, err := filepath.Rel(root, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if relDest == "." {
|
||||
relDest = "/"
|
||||
}
|
||||
if relDest[0] != '/' {
|
||||
relDest = "/" + relDest
|
||||
}
|
||||
dest = relDest
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-untar", dest, root)
|
||||
cmd.Stdin = decompressedArchive
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
@@ -68,6 +98,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
||||
}
|
||||
|
||||
//write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
||||
@@ -84,3 +115,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func tar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
src := flag.Arg(0)
|
||||
var root string
|
||||
if len(flag.Args()) > 1 {
|
||||
root = flag.Arg(1)
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = src
|
||||
}
|
||||
|
||||
if err := realChroot(root); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
var options archive.TarOptions
|
||||
if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
rdr, err := archive.TarWithOptions(src, &options)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
||||
if _, err := io.Copy(os.Stdout, rdr); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
if root == "" {
|
||||
return nil, errors.New("root path must not be empty")
|
||||
}
|
||||
|
||||
relSrc, err := filepath.Rel(root, srcPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if relSrc == "." {
|
||||
relSrc = "/"
|
||||
}
|
||||
if relSrc[0] != '/' {
|
||||
relSrc = "/" + relSrc
|
||||
}
|
||||
|
||||
// make sure we didn't trim a trailing slash with the call to `Rel`
|
||||
if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
|
||||
relSrc += "/"
|
||||
}
|
||||
|
||||
cmd := reexec.Command("storage-tar", relSrc, root)
|
||||
|
||||
errBuff := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = errBuff
|
||||
|
||||
tarR, tarW := io.Pipe()
|
||||
cmd.Stdout = tarW
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting options pipe for tar process")
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, errors.Wrap(err, "tar error on re-exec cmd")
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
|
||||
tarW.CloseWithError(err)
|
||||
}()
|
||||
|
||||
if err := json.NewEncoder(stdin).Encode(options); err != nil {
|
||||
stdin.Close()
|
||||
return nil, errors.Wrap(err, "tar json encode to pipe failed")
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
return tarR, nil
|
||||
}
|
||||
|
9
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go
generated
vendored
@@ -14,9 +14,16 @@ func chroot(path string) error {
|
||||
|
||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
||||
dest string,
|
||||
options *archive.TarOptions) error {
|
||||
options *archive.TarOptions, root string) error {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the unpack. We call inline instead within the daemon process.
|
||||
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the pack. We call inline instead within the daemon process.
|
||||
return archive.TarWithOptions(srcPath, options)
|
||||
}
|
||||
|
6
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go
generated
vendored
@@ -4,9 +4,13 @@ package chrootarchive
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func chroot(path string) error {
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chdir("/")
|
||||
}
|
||||
|
||||
func chroot(path string) error {
|
||||
return realChroot(path)
|
||||
}
|
||||
|
1
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
1
vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
func init() {
|
||||
reexec.Register("storage-applyLayer", applyLayer)
|
||||
reexec.Register("storage-untar", untar)
|
||||
reexec.Register("storage-tar", tar)
|
||||
}
|
||||
|
||||
func fatal(err error) {
|
||||
|
Reference in New Issue
Block a user