mirror of
https://github.com/mudler/luet.git
synced 2025-08-01 23:41:37 +00:00
Add package compression type
TODO: Handle the path substitution in a separate field Adds GZip support and allows the compiler to switch compression type. It also adds Concurrency as a compiler attribute (not consumed yet) Refers to #33
This commit is contained in:
parent
bb98259a48
commit
d2d72c3fc4
1
go.mod
1
go.mod
@ -15,6 +15,7 @@ require (
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible // indirect
|
||||
github.com/hashicorp/go-version v1.2.0
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/kyokomi/emoji v2.1.0+incompatible
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
|
||||
|
@ -17,6 +17,7 @@ package compiler
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -24,16 +25,24 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
//"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type CompressionImplementation string
|
||||
|
||||
const (
|
||||
None CompressionImplementation = "none" // e.g. tar for standard packages
|
||||
GZip CompressionImplementation = "gzip"
|
||||
)
|
||||
|
||||
type ArtifactIndex []Artifact
|
||||
@ -57,10 +66,11 @@ type PackageArtifact struct {
|
||||
CompileSpec *LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType CompressionImplementation `json:"compression"`
|
||||
}
|
||||
|
||||
func NewPackageArtifact(path string) Artifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}}
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: None}
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
@ -73,6 +83,10 @@ func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompressionType(t CompressionImplementation) {
|
||||
a.CompressionType = t
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) Hash() error {
|
||||
return a.Checksums.Generate(a)
|
||||
}
|
||||
@ -174,13 +188,85 @@ func (a *PackageArtifact) SetPath(p string) {
|
||||
}
|
||||
|
||||
// Compress Archives and compress (TODO) to the artifact path
|
||||
func (a *PackageArtifact) Compress(src string) error {
|
||||
return helpers.Tar(src, a.Path)
|
||||
func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
switch a.CompressionType {
|
||||
case None:
|
||||
return helpers.Tar(src, a.Path)
|
||||
|
||||
case GZip:
|
||||
err := helpers.Tar(src, a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
original, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer original.Close()
|
||||
|
||||
gzipfile := a.Path + ".gz"
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
|
||||
// Open a file for writing.
|
||||
dst, err := os.Create(gzipfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Create gzip writer.
|
||||
w := gzip.NewWriter(dst)
|
||||
w.SetConcurrency(concurrency, 10)
|
||||
defer w.Close()
|
||||
defer dst.Close()
|
||||
_, err = io.Copy(w, bufferedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Close()
|
||||
os.RemoveAll(a.Path) // Remove original
|
||||
a.Path = gzipfile
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
}
|
||||
|
||||
// Unpack Untar and decompress (TODO) to the given path
|
||||
func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
return helpers.Untar(a.GetPath(), dst, keepPerms)
|
||||
switch a.CompressionType {
|
||||
case None:
|
||||
return helpers.Untar(a.GetPath(), dst, keepPerms)
|
||||
|
||||
case GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".tar")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".tar")
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer original.Close()
|
||||
|
||||
bufferedReader := bufio.NewReader(original)
|
||||
r, err := gzip.NewReader(bufferedReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
_, err = io.Copy(archive, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = helpers.Untar(a.GetPath()+".tar", dst, keepPerms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
@ -242,7 +328,7 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
}
|
||||
|
||||
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string) (Artifact, error) {
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, t CompressionImplementation) (Artifact, error) {
|
||||
|
||||
archive, err := ioutil.TempDir(os.TempDir(), "archive")
|
||||
if err != nil {
|
||||
@ -306,10 +392,11 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
|
||||
close(toCopy)
|
||||
wg.Wait()
|
||||
|
||||
err = helpers.Tar(archive, dst)
|
||||
a := NewPackageArtifact(dst)
|
||||
a.SetCompressionType(t)
|
||||
err = a.Compress(archive, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
return NewPackageArtifact(dst), nil
|
||||
return a, nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -40,6 +41,8 @@ type LuetCompiler struct {
|
||||
Database pkg.PackageDatabase
|
||||
ImageRepository string
|
||||
PullFirst, KeepImg bool
|
||||
Concurrency int
|
||||
CompressionType CompressionImplementation
|
||||
}
|
||||
|
||||
func NewLuetCompiler(backend CompilerBackend, db pkg.PackageDatabase) Compiler {
|
||||
@ -52,10 +55,16 @@ func NewLuetCompiler(backend CompilerBackend, db pkg.PackageDatabase) Compiler {
|
||||
Database: db,
|
||||
ImageRepository: "luet/cache",
|
||||
PullFirst: true,
|
||||
CompressionType: None,
|
||||
KeepImg: true,
|
||||
Concurrency: runtime.NumCPU(),
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) SetConcurrency(i int) {
|
||||
cs.Concurrency = i
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan CompilationSpec, a *[]Artifact, m *sync.Mutex, concurrency int, keepPermissions bool, errors chan error) {
|
||||
defer wg.Done()
|
||||
|
||||
@ -70,6 +79,7 @@ func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan Co
|
||||
m.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) CompileWithReverseDeps(concurrency int, keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error) {
|
||||
artifacts, err := cs.CompileParallel(concurrency, keepPermissions, ps)
|
||||
if len(err) != 0 {
|
||||
@ -340,7 +350,8 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
cs.stripIncludesFromRootfs(p.GetIncludes(), rootfs)
|
||||
}
|
||||
artifact = NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
err = artifact.Compress(rootfs)
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
err = artifact.Compress(rootfs, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
@ -349,10 +360,11 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
} else {
|
||||
Info(pkgTag, "Generating delta")
|
||||
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes())
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), cs.CompressionType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||
}
|
||||
|
||||
artifact.SetCompileSpec(p)
|
||||
}
|
||||
|
||||
@ -365,7 +377,7 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPermissions, keepImg bool) (Artifact, error) {
|
||||
func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPermissions, keepImg bool, concurrency int) (Artifact, error) {
|
||||
pkgTag := ":package: " + p.GetPackage().GetName()
|
||||
|
||||
Info(pkgTag, " 🍩 Build starts 🔨 🔨 🔨 ")
|
||||
@ -405,8 +417,9 @@ func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPerm
|
||||
}
|
||||
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompileSpec(p)
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
|
||||
err = artifact.Compress(rootfs)
|
||||
err = artifact.Compress(rootfs, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
@ -480,7 +493,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
|
||||
if p.GetImage() != "" {
|
||||
if p.ImageUnpack() { // If it is just an entire image, create a package from it
|
||||
return cs.packageFromImage(p, "", keepPermissions, cs.KeepImg)
|
||||
return cs.packageFromImage(p, "", keepPermissions, cs.KeepImg, concurrency)
|
||||
}
|
||||
|
||||
return cs.compileWithImage(p.GetImage(), "", "", concurrency, keepPermissions, cs.KeepImg, p)
|
||||
@ -527,7 +540,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
return nil, errors.New("No image defined for package: " + assertion.Package.GetName())
|
||||
}
|
||||
Info(pkgTag, ":whale: Sourcing package from image", compileSpec.GetImage())
|
||||
artifact, err := cs.packageFromImage(compileSpec, currentPackageImageHash, keepPermissions, cs.KeepImg)
|
||||
artifact, err := cs.packageFromImage(compileSpec, currentPackageImageHash, keepPermissions, cs.KeepImg, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().GetName())
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ type Compiler interface {
|
||||
CompileParallel(concurrency int, keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
CompileWithReverseDeps(concurrency int, keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error)
|
||||
|
||||
SetConcurrency(i int)
|
||||
FromPackage(pkg.Package) (CompilationSpec, error)
|
||||
|
||||
SetBackend(CompilerBackend)
|
||||
@ -63,7 +63,8 @@ type Artifact interface {
|
||||
GetCompileSpec() CompilationSpec
|
||||
WriteYaml(dst string) error
|
||||
Unpack(dst string, keepPerms bool) error
|
||||
Compress(src string) error
|
||||
Compress(src string, concurrency int) error
|
||||
SetCompressionType(t CompressionImplementation)
|
||||
FileList() ([]string, error)
|
||||
Hash() error
|
||||
Verify() error
|
||||
|
Loading…
Reference in New Issue
Block a user