mirror of
https://github.com/mudler/luet.git
synced 2025-09-01 15:18:28 +00:00
use containerd to uncompress
This commit is contained in:
@@ -57,14 +57,6 @@ func NewConfigProtect(annotationDir string) *ConfigProtect {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigProtect) AddAnnotationDir(d string) {
|
|
||||||
c.AnnotationDir = d
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ConfigProtect) GetAnnotationDir() string {
|
|
||||||
return c.AnnotationDir
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ConfigProtect) Map(files []string, protected []ConfigProtectConfFile) {
|
func (c *ConfigProtect) Map(files []string, protected []ConfigProtectConfFile) {
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
@@ -72,6 +72,7 @@ var _ = Describe("Delta", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img2,
|
img2,
|
||||||
|
true,
|
||||||
ExtractDeltaFiles(ctx, diff, []string{}, []string{}),
|
ExtractDeltaFiles(ctx, diff, []string{}, []string{}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -87,6 +88,7 @@ var _ = Describe("Delta", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img2,
|
img2,
|
||||||
|
true,
|
||||||
ExtractDeltaFiles(ctx, diff, []string{}, []string{"usr/local/go"}),
|
ExtractDeltaFiles(ctx, diff, []string{}, []string{"usr/local/go"}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -98,6 +100,7 @@ var _ = Describe("Delta", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img2,
|
img2,
|
||||||
|
true,
|
||||||
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{"usr/local/go/bin"}),
|
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{"usr/local/go/bin"}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -110,6 +113,7 @@ var _ = Describe("Delta", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img2,
|
img2,
|
||||||
|
true,
|
||||||
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{}),
|
ExtractDeltaFiles(ctx, diff, []string{"usr/local/go"}, []string{}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
@@ -166,7 +166,7 @@ func ExtractFiles(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
perms := map[string][]int{}
|
perms := map[string][]int{}
|
||||||
@@ -190,39 +190,41 @@ func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filt
|
|||||||
return 0, "", err
|
return 0, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
for f, p := range perms {
|
// TODO: Parametrize this
|
||||||
ff := filepath.Join(output, f)
|
if keepPerms {
|
||||||
if _, err := os.Lstat(ff); err == nil {
|
for f, p := range perms {
|
||||||
if err := os.Lchown(ff, p[1], p[0]); err != nil {
|
ff := filepath.Join(output, f)
|
||||||
ctx.Warning(err, "failed chowning file")
|
if _, err := os.Lstat(ff); err == nil {
|
||||||
|
if err := os.Lchown(ff, p[1], p[0]); err != nil {
|
||||||
|
ctx.Warning(err, "failed chowning file")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range []map[string]map[string]string{xattrs, paxrecords} {
|
for _, m := range []map[string]map[string]string{xattrs, paxrecords} {
|
||||||
for key, attrs := range m {
|
for key, attrs := range m {
|
||||||
ff := filepath.Join(output, key)
|
ff := filepath.Join(output, key)
|
||||||
for k, attr := range attrs {
|
for k, attr := range attrs {
|
||||||
if err := system.Lsetxattr(ff, k, []byte(attr), 0); err != nil {
|
if err := system.Lsetxattr(ff, k, []byte(attr), 0); err != nil {
|
||||||
if errors.Is(err, syscall.ENOTSUP) {
|
if errors.Is(err, syscall.ENOTSUP) {
|
||||||
ctx.Debug("ignored xattr %s in archive", key)
|
ctx.Debug("ignored xattr %s in archive", key)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return c, output, nil
|
return c, output, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Extract(ctx *types.Context, img v1.Image, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
func Extract(ctx *types.Context, img v1.Image, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||||
tmpdiffs, err := ctx.Config.GetSystem().TempDir("extraction")
|
tmpdiffs, err := ctx.Config.GetSystem().TempDir("extraction")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, "", errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
return 0, "", errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||||
}
|
}
|
||||||
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, filter, opts...)
|
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, keepPerms, filter, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExtractTo(ctx *types.Context, img v1.Image, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
func ExtractTo(ctx *types.Context, img v1.Image, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
|
||||||
return ExtractReader(ctx, mutate.Extract(img), output, filter, opts...)
|
return ExtractReader(ctx, mutate.Extract(img), output, keepPerms, filter, opts...)
|
||||||
}
|
}
|
||||||
|
@@ -58,6 +58,7 @@ var _ = Describe("Extract", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
|
true,
|
||||||
ExtractFiles(ctx, "", []string{}, []string{}),
|
ExtractFiles(ctx, "", []string{}, []string{}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -71,6 +72,7 @@ var _ = Describe("Extract", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
|
true,
|
||||||
ExtractFiles(ctx, "/usr", []string{}, []string{}),
|
ExtractFiles(ctx, "/usr", []string{}, []string{}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -84,6 +86,7 @@ var _ = Describe("Extract", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
|
true,
|
||||||
ExtractFiles(ctx, "/usr", []string{"bin"}, []string{"sbin"}),
|
ExtractFiles(ctx, "/usr", []string{"bin"}, []string{"sbin"}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -98,6 +101,7 @@ var _ = Describe("Extract", func() {
|
|||||||
_, tmpdir, err := Extract(
|
_, tmpdir, err := Extract(
|
||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
|
true,
|
||||||
ExtractFiles(ctx, "", []string{"/usr|/usr/bin"}, []string{"^/bin"}),
|
ExtractFiles(ctx, "", []string{"/usr|/usr/bin"}, []string{"^/bin"}),
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
@@ -28,6 +28,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
zstd "github.com/klauspost/compress/zstd"
|
zstd "github.com/klauspost/compress/zstd"
|
||||||
gzip "github.com/klauspost/pgzip"
|
gzip "github.com/klauspost/pgzip"
|
||||||
@@ -35,6 +36,7 @@ import (
|
|||||||
//"strconv"
|
//"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
containerdCompression "github.com/containerd/containerd/archive/compression"
|
||||||
config "github.com/mudler/luet/pkg/api/core/config"
|
config "github.com/mudler/luet/pkg/api/core/config"
|
||||||
"github.com/mudler/luet/pkg/api/core/image"
|
"github.com/mudler/luet/pkg/api/core/image"
|
||||||
types "github.com/mudler/luet/pkg/api/core/types"
|
types "github.com/mudler/luet/pkg/api/core/types"
|
||||||
@@ -67,8 +69,8 @@ type PackageArtifact struct {
|
|||||||
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
|
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
|
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, keepPerms bool, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
|
||||||
_, tmpdiffs, err := image.Extract(ctx, img, filter)
|
_, tmpdiffs, err := image.Extract(ctx, img, keepPerms, filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||||
}
|
}
|
||||||
@@ -385,6 +387,85 @@ func hashFileContent(path string) (string, error) {
|
|||||||
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
|
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func replaceFileTarWrapper(dst string, inputTarStream io.ReadCloser, mods []string, fn func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)) io.ReadCloser {
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
tarReader := tar.NewReader(inputTarStream)
|
||||||
|
tarWriter := tar.NewWriter(pipeWriter)
|
||||||
|
defer inputTarStream.Close()
|
||||||
|
defer tarWriter.Close()
|
||||||
|
|
||||||
|
modify := func(name string, original *tar.Header, tarReader io.Reader) error {
|
||||||
|
header, data, err := fn(dst, name, original, tarReader)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case header == nil:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Name == "" {
|
||||||
|
header.Name = name
|
||||||
|
}
|
||||||
|
header.Size = int64(len(data))
|
||||||
|
if err := tarWriter.WriteHeader(header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(data) != 0 {
|
||||||
|
if _, err := tarWriter.Write(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var remaining []string
|
||||||
|
var err error
|
||||||
|
var originalHeader *tar.Header
|
||||||
|
for {
|
||||||
|
originalHeader, err = tarReader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if helpers.Contains(mods, originalHeader.Name) {
|
||||||
|
// No modifiers for this file, copy the header and data
|
||||||
|
if err := tarWriter.WriteHeader(originalHeader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
remaining = append(remaining, originalHeader.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := modify(originalHeader.Name, originalHeader, tarReader); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply the modifiers that haven't matched any files in the archive
|
||||||
|
for _, name := range remaining {
|
||||||
|
if err := modify(name, nil, nil); err != nil {
|
||||||
|
pipeWriter.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeWriter.Close()
|
||||||
|
|
||||||
|
}()
|
||||||
|
return pipeReader
|
||||||
|
}
|
||||||
|
|
||||||
func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||||
return func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
return func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||||
// If the destination path already exists I rename target file name with postfix.
|
// If the destination path already exists I rename target file name with postfix.
|
||||||
@@ -448,8 +529,7 @@ func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) []string {
|
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) (res []string) {
|
||||||
ans := []string{}
|
|
||||||
annotationDir := ""
|
annotationDir := ""
|
||||||
|
|
||||||
if !ctx.Config.ConfigProtectSkip {
|
if !ctx.Config.ConfigProtectSkip {
|
||||||
@@ -468,159 +548,75 @@ func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) []string {
|
|||||||
cp.Map(a.Files, ctx.Config.GetConfigProtectConfFiles())
|
cp.Map(a.Files, ctx.Config.GetConfigProtectConfFiles())
|
||||||
|
|
||||||
// NOTE: for unpack we need files path without initial /
|
// NOTE: for unpack we need files path without initial /
|
||||||
ans = cp.GetProtectFiles(false)
|
res = cp.GetProtectFiles(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ans
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack Untar and decompress (TODO) to the given path
|
// Unpack Untar and decompress (TODO) to the given path
|
||||||
func (a *PackageArtifact) Unpack(ctx *types.Context, dst string, keepPerms bool) error {
|
func (a *PackageArtifact) Unpack(ctx *types.Context, dst string, keepPerms bool) error {
|
||||||
if !strings.HasPrefix(dst, "/") {
|
|
||||||
|
if !strings.HasPrefix(dst, string(os.PathSeparator)) {
|
||||||
return errors.New("destination must be an absolute path")
|
return errors.New("destination must be an absolute path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create
|
// Create
|
||||||
protectedFiles := a.GetProtectFiles(ctx)
|
protectedFiles := a.GetProtectFiles(ctx)
|
||||||
|
|
||||||
tarModifier := helpers.NewTarModifierWrapper(dst, tarModifierWrapperFunc(ctx))
|
mod := tarModifierWrapperFunc(ctx)
|
||||||
|
//tarModifier := helpers.NewTarModifierWrapper(dst, mod)
|
||||||
|
|
||||||
switch a.CompressionType {
|
archiveFile, err := os.Open(a.Path)
|
||||||
case compression.Zstandard:
|
if err != nil {
|
||||||
// Create the uncompressed archive
|
return errors.Wrap(err, "Cannot open "+a.Path)
|
||||||
archive, err := os.Create(a.Path + ".uncompressed")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
|
||||||
defer archive.Close()
|
|
||||||
|
|
||||||
original, err := os.Open(a.Path)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Cannot open "+a.Path)
|
|
||||||
}
|
|
||||||
defer original.Close()
|
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(original)
|
|
||||||
|
|
||||||
d, err := zstd.NewReader(bufferedReader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer d.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(archive, d)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
|
||||||
ctx.Config.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case compression.GZip:
|
|
||||||
// Create the uncompressed archive
|
|
||||||
archive, err := os.Create(a.Path + ".uncompressed")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
|
||||||
defer archive.Close()
|
|
||||||
|
|
||||||
original, err := os.Open(a.Path)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Cannot open "+a.Path)
|
|
||||||
}
|
|
||||||
defer original.Close()
|
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(original)
|
|
||||||
r, err := gzip.NewReader(bufferedReader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(archive, r)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
|
||||||
ctx.Config.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
// Defaults to tar only (covers when "none" is supplied)
|
|
||||||
default:
|
|
||||||
return helpers.UntarProtect(a.Path, dst, ctx.Config.GetGeneral().SameOwner,
|
|
||||||
protectedFiles, tarModifier)
|
|
||||||
}
|
}
|
||||||
return errors.New("Compression type must be supplied")
|
defer archiveFile.Close()
|
||||||
|
|
||||||
|
decompressed, err := containerdCompression.DecompressStream(archiveFile)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Cannot open "+a.Path)
|
||||||
|
}
|
||||||
|
defer decompressed.Close()
|
||||||
|
|
||||||
|
replacerArchive := replaceFileTarWrapper(dst, decompressed, protectedFiles, mod)
|
||||||
|
defer replacerArchive.Close()
|
||||||
|
|
||||||
|
// or with filter?
|
||||||
|
// func(header *tar.Header) (bool, error) {
|
||||||
|
// if helpers.Contains(protectedFiles, header.Name) {
|
||||||
|
// newHead, _, err := mod(dst, header.Name, header, decompressed)
|
||||||
|
// if err != nil {
|
||||||
|
// return false, err
|
||||||
|
// }
|
||||||
|
// header.Name = newHead.Name
|
||||||
|
// // Override target path
|
||||||
|
// //target = filepath.Join(dest, header.Name)
|
||||||
|
// }
|
||||||
|
// // tarModifier.Modifier()
|
||||||
|
// return true, nil
|
||||||
|
// },
|
||||||
|
_, _, err = image.ExtractReader(ctx, replacerArchive, dst, ctx.Config.GetGeneral().SameOwner, nil)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileList generates the list of file of a package from the local archive
|
// FileList generates the list of file of a package from the local archive
|
||||||
func (a *PackageArtifact) FileList() ([]string, error) {
|
func (a *PackageArtifact) FileList() ([]string, error) {
|
||||||
var tr *tar.Reader
|
|
||||||
switch a.CompressionType {
|
|
||||||
case compression.Zstandard:
|
|
||||||
archive, err := os.Create(a.Path + ".uncompressed")
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
|
||||||
defer archive.Close()
|
|
||||||
|
|
||||||
original, err := os.Open(a.Path)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
|
|
||||||
}
|
|
||||||
defer original.Close()
|
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(original)
|
|
||||||
r, err := zstd.NewReader(bufferedReader)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
tr = tar.NewReader(r)
|
|
||||||
case compression.GZip:
|
|
||||||
// Create the uncompressed archive
|
|
||||||
archive, err := os.Create(a.Path + ".uncompressed")
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
|
||||||
defer archive.Close()
|
|
||||||
|
|
||||||
original, err := os.Open(a.Path)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
|
|
||||||
}
|
|
||||||
defer original.Close()
|
|
||||||
|
|
||||||
bufferedReader := bufio.NewReader(original)
|
|
||||||
r, err := gzip.NewReader(bufferedReader)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
tr = tar.NewReader(r)
|
|
||||||
|
|
||||||
// Defaults to tar only (covers when "none" is supplied)
|
|
||||||
default:
|
|
||||||
tarFile, err := os.Open(a.Path)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, errors.Wrap(err, "Could not open package archive")
|
|
||||||
}
|
|
||||||
defer tarFile.Close()
|
|
||||||
tr = tar.NewReader(tarFile)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
var files []string
|
var files []string
|
||||||
|
|
||||||
|
archiveFile, err := os.Open(a.Path)
|
||||||
|
if err != nil {
|
||||||
|
return files, errors.Wrap(err, "Cannot open "+a.Path)
|
||||||
|
}
|
||||||
|
defer archiveFile.Close()
|
||||||
|
|
||||||
|
decompressed, err := containerdCompression.DecompressStream(archiveFile)
|
||||||
|
if err != nil {
|
||||||
|
return files, errors.Wrap(err, "Cannot open "+a.Path)
|
||||||
|
}
|
||||||
|
defer decompressed.Close()
|
||||||
|
tr := tar.NewReader(decompressed)
|
||||||
|
|
||||||
// untar each segment
|
// untar each segment
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
|
@@ -162,6 +162,7 @@ RUN echo bar > /test2`))
|
|||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
result,
|
result,
|
||||||
|
false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -211,6 +212,7 @@ RUN echo bar > /test2`))
|
|||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
result,
|
result,
|
||||||
|
false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
@@ -248,6 +248,7 @@ func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compi
|
|||||||
_, rootfs, err := image.Extract(
|
_, rootfs, err := image.Extract(
|
||||||
cs.Options.Context,
|
cs.Options.Context,
|
||||||
img,
|
img,
|
||||||
|
keepPermissions,
|
||||||
image.ExtractFiles(
|
image.ExtractFiles(
|
||||||
cs.Options.Context,
|
cs.Options.Context,
|
||||||
p.GetPackageDir(),
|
p.GetPackageDir(),
|
||||||
@@ -316,6 +317,7 @@ func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *co
|
|||||||
ref2,
|
ref2,
|
||||||
cs.Options.CompressionType,
|
cs.Options.CompressionType,
|
||||||
p.Rel(fmt.Sprintf("%s%s", p.GetPackage().GetFingerPrint(), ".package.tar")),
|
p.Rel(fmt.Sprintf("%s%s", p.GetPackage().GetFingerPrint(), ".package.tar")),
|
||||||
|
keepPermissions,
|
||||||
image.ExtractDeltaFiles(cs.Options.Context, diff, p.GetIncludes(), p.GetExcludes()),
|
image.ExtractDeltaFiles(cs.Options.Context, diff, p.GetIncludes(), p.GetExcludes()),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -213,12 +213,8 @@ func DownloadAndExtractDockerImage(ctx *luettypes.Context, image, dest string, a
|
|||||||
ctx,
|
ctx,
|
||||||
img,
|
img,
|
||||||
dest,
|
dest,
|
||||||
luetimages.ExtractFiles(
|
true,
|
||||||
ctx,
|
nil,
|
||||||
"",
|
|
||||||
[]string{},
|
|
||||||
[]string{},
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@@ -348,6 +348,7 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
for p := range c {
|
for p := range c {
|
||||||
|
|
||||||
if p.Uninstall.Package != nil {
|
if p.Uninstall.Package != nil {
|
||||||
l.Options.Context.Debug("Replacing package inplace")
|
l.Options.Context.Debug("Replacing package inplace")
|
||||||
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
||||||
@@ -356,8 +357,10 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
|||||||
continue
|
continue
|
||||||
//return errors.Wrap(err, "while computing uninstall")
|
//return errors.Wrap(err, "while computing uninstall")
|
||||||
}
|
}
|
||||||
|
systemLock.Lock()
|
||||||
err = uninstall()
|
err = uninstall()
|
||||||
|
systemLock.Unlock()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Options.Context.Error("Failed uninstall for ", packsToList(toUninstall))
|
l.Options.Context.Error("Failed uninstall for ", packsToList(toUninstall))
|
||||||
continue
|
continue
|
||||||
|
@@ -929,7 +929,11 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
|||||||
}
|
}
|
||||||
ctx.Debug("Decompress tree of the repository " + r.Name + "...")
|
ctx.Debug("Decompress tree of the repository " + r.Name + "...")
|
||||||
|
|
||||||
err = treeFileArtifact.Unpack(ctx, treefs, true)
|
if _, err := os.Lstat(treefs); os.IsNotExist(err) {
|
||||||
|
os.MkdirAll(treefs, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = treeFileArtifact.Unpack(ctx, treefs, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error met while unpacking tree")
|
return nil, errors.Wrap(err, "Error met while unpacking tree")
|
||||||
}
|
}
|
||||||
@@ -937,7 +941,7 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
|||||||
// FIXME: It seems that tar with only one file doesn't create destination
|
// FIXME: It seems that tar with only one file doesn't create destination
|
||||||
// directory. I create directory directly for now.
|
// directory. I create directory directly for now.
|
||||||
os.MkdirAll(metafs, os.ModePerm)
|
os.MkdirAll(metafs, os.ModePerm)
|
||||||
err = metaFileArtifact.Unpack(ctx, metafs, true)
|
err = metaFileArtifact.Unpack(ctx, metafs, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error met while unpacking metadata")
|
return nil, errors.Wrap(err, "Error met while unpacking metadata")
|
||||||
}
|
}
|
||||||
|
@@ -204,6 +204,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
|||||||
d.context,
|
d.context,
|
||||||
img,
|
img,
|
||||||
repoTemp,
|
repoTemp,
|
||||||
|
d.context.Config.GetGeneral().SameOwner,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Reference in New Issue
Block a user