2019-11-04 16:16:13 +00:00
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
2019-11-14 16:45:21 +00:00
"fmt"
2019-11-04 16:16:13 +00:00
"io/ioutil"
2019-11-10 09:48:07 +00:00
"os"
"path/filepath"
2019-12-03 16:26:53 +00:00
"regexp"
2019-12-30 11:53:32 +00:00
"runtime"
2019-12-03 16:26:53 +00:00
"strings"
2019-11-11 09:22:55 +00:00
"sync"
2019-11-04 16:16:13 +00:00
"github.com/mudler/luet/pkg/helpers"
2019-12-06 15:28:42 +00:00
. "github.com/mudler/luet/pkg/logger"
2019-11-04 16:16:13 +00:00
pkg "github.com/mudler/luet/pkg/package"
2019-11-11 18:19:13 +00:00
"github.com/mudler/luet/pkg/solver"
2019-11-04 16:16:13 +00:00
"github.com/mudler/luet/pkg/tree"
2019-11-11 09:22:55 +00:00
"github.com/pkg/errors"
2019-11-04 16:16:13 +00:00
)
const BuildFile = "build.yaml"
type LuetCompiler struct {
2019-11-05 16:36:22 +00:00
* tree . CompilerRecipe
2019-12-02 15:33:59 +00:00
Backend CompilerBackend
Database pkg . PackageDatabase
ImageRepository string
PullFirst , KeepImg bool
2019-12-30 11:53:32 +00:00
Concurrency int
CompressionType CompressionImplementation
2019-11-04 16:16:13 +00:00
}
2019-11-29 18:01:49 +00:00
func NewLuetCompiler ( backend CompilerBackend , db pkg . PackageDatabase ) Compiler {
2019-11-05 16:36:22 +00:00
// The CompilerRecipe will gives us a tree with only build deps listed.
return & LuetCompiler {
Backend : backend ,
CompilerRecipe : & tree . CompilerRecipe {
2019-11-29 18:01:49 +00:00
tree . Recipe { Database : db } ,
2019-11-05 16:36:22 +00:00
} ,
2019-12-02 15:33:59 +00:00
Database : db ,
ImageRepository : "luet/cache" ,
PullFirst : true ,
2019-12-30 11:53:32 +00:00
CompressionType : None ,
2019-12-02 15:33:59 +00:00
KeepImg : true ,
2019-12-30 11:53:32 +00:00
Concurrency : runtime . NumCPU ( ) ,
2019-11-05 16:36:22 +00:00
}
2019-11-04 16:16:13 +00:00
}
2019-12-30 11:53:32 +00:00
func ( cs * LuetCompiler ) SetConcurrency ( i int ) {
cs . Concurrency = i
}
2019-11-11 09:22:55 +00:00
func ( cs * LuetCompiler ) compilerWorker ( i int , wg * sync . WaitGroup , cspecs chan CompilationSpec , a * [ ] Artifact , m * sync . Mutex , concurrency int , keepPermissions bool , errors chan error ) {
defer wg . Done ( )
for s := range cspecs {
2019-11-15 23:38:07 +00:00
ar , err := cs . compile ( concurrency , keepPermissions , s )
2019-11-11 09:22:55 +00:00
if err != nil {
errors <- err
}
m . Lock ( )
* a = append ( * a , ar )
m . Unlock ( )
}
}
2019-12-30 11:53:32 +00:00
2019-11-15 17:11:26 +00:00
func ( cs * LuetCompiler ) CompileWithReverseDeps ( concurrency int , keepPermissions bool , ps CompilationSpecs ) ( [ ] Artifact , [ ] error ) {
artifacts , err := cs . CompileParallel ( concurrency , keepPermissions , ps )
if len ( err ) != 0 {
return artifacts , err
}
2019-11-16 13:16:44 +00:00
Info ( ":ant: Resolving reverse dependencies" )
2019-11-15 17:11:26 +00:00
toCompile := NewLuetCompilationspecs ( )
for _ , a := range artifacts {
2019-11-29 18:01:49 +00:00
2019-12-06 15:28:42 +00:00
revdeps := a . GetCompileSpec ( ) . GetPackage ( ) . Revdeps ( cs . Database )
2019-11-15 17:11:26 +00:00
for _ , r := range revdeps {
spec , asserterr := cs . FromPackage ( r )
if err != nil {
return nil , append ( err , asserterr )
}
spec . SetOutputPath ( ps . All ( ) [ 0 ] . GetOutputPath ( ) )
toCompile . Add ( spec )
}
// for _, assertion := range a.GetSourceAssertion() {
// if assertion.Value && assertion.Package.Flagged() {
// spec, asserterr := cs.FromPackage(assertion.Package)
// if err != nil {
// return nil, append(err, asserterr)
// }
// w, asserterr := cs.Tree().World()
// if err != nil {
// return nil, append(err, asserterr)
// }
// revdeps := spec.GetPackage().Revdeps(&w)
// for _, r := range revdeps {
// spec, asserterr := cs.FromPackage(r)
// if asserterr != nil {
// return nil, append(err, asserterr)
// }
// spec.SetOutputPath(ps.All()[0].GetOutputPath())
// toCompile.Add(spec)
// }
// }
// }
}
uniques := toCompile . Unique ( ) . Remove ( ps )
for _ , u := range uniques . All ( ) {
2019-11-16 13:40:58 +00:00
Info ( " :arrow_right_hook:" , u . GetPackage ( ) . GetName ( ) , ":leaves:" , u . GetPackage ( ) . GetVersion ( ) , "(" , u . GetPackage ( ) . GetCategory ( ) , ")" )
2019-11-15 17:11:26 +00:00
}
artifacts2 , err := cs . CompileParallel ( concurrency , keepPermissions , uniques )
return append ( artifacts , artifacts2 ... ) , err
}
2019-11-11 09:22:55 +00:00
2019-11-15 17:11:26 +00:00
func ( cs * LuetCompiler ) CompileParallel ( concurrency int , keepPermissions bool , ps CompilationSpecs ) ( [ ] Artifact , [ ] error ) {
2019-11-16 13:40:58 +00:00
Spinner ( 22 )
defer SpinnerStop ( )
2019-11-11 09:22:55 +00:00
all := make ( chan CompilationSpec )
artifacts := [ ] Artifact { }
mutex := & sync . Mutex { }
2019-11-15 17:11:26 +00:00
errors := make ( chan error , ps . Len ( ) )
2019-11-11 09:22:55 +00:00
var wg = new ( sync . WaitGroup )
for i := 0 ; i < concurrency ; i ++ {
wg . Add ( 1 )
go cs . compilerWorker ( i , wg , all , & artifacts , mutex , concurrency , keepPermissions , errors )
}
2019-11-15 17:11:26 +00:00
for _ , p := range ps . All ( ) {
2019-11-15 23:38:07 +00:00
asserts , err := cs . ComputeDepTree ( p )
if err != nil {
panic ( err )
}
p . SetSourceAssertion ( asserts )
2019-11-11 09:22:55 +00:00
all <- p
}
close ( all )
wg . Wait ( )
close ( errors )
var allErrors [ ] error
for e := range errors {
allErrors = append ( allErrors , e )
}
return artifacts , allErrors
}
2019-12-03 16:26:53 +00:00
func ( cs * LuetCompiler ) stripIncludesFromRootfs ( includes [ ] string , rootfs string ) error {
var includeRegexp [ ] * regexp . Regexp
for _ , i := range includes {
r , e := regexp . Compile ( i )
if e != nil {
return errors . Wrap ( e , "Could not compile regex in the include of the package" )
}
includeRegexp = append ( includeRegexp , r )
}
toRemove := [ ] string { }
// the function that handles each file or dir
var ff = func ( currentpath string , info os . FileInfo , err error ) error {
// if info.Name() != DefinitionFile {
// return nil // Skip with no errors
// }
if currentpath == rootfs {
return nil
}
abspath := strings . ReplaceAll ( currentpath , rootfs , "" )
match := false
for _ , i := range includeRegexp {
if i . MatchString ( abspath ) {
match = true
}
}
if ! match {
toRemove = append ( toRemove , currentpath )
}
return nil
}
err := filepath . Walk ( rootfs , ff )
if err != nil {
return err
}
for _ , s := range toRemove {
e := os . RemoveAll ( s )
if e != nil {
Warning ( "Failed removing" , s , e . Error ( ) )
return e
}
}
return nil
}
2019-12-02 15:33:59 +00:00
func ( cs * LuetCompiler ) compileWithImage ( image , buildertaggedImage , packageImage string , concurrency int , keepPermissions , keepImg bool , p CompilationSpec ) ( Artifact , error ) {
2019-11-26 19:11:51 +00:00
pkgTag := ":package: " + p . GetPackage ( ) . GetName ( )
2019-11-11 18:19:13 +00:00
p . SetSeedImage ( image ) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver,
// and we build all the images first.
2019-12-02 15:33:59 +00:00
2019-11-17 12:27:32 +00:00
err := os . MkdirAll ( p . Rel ( "build" ) , os . ModePerm )
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating tempdir for building" )
}
buildDir , err := ioutil . TempDir ( p . Rel ( "build" ) , "pack" )
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating tempdir for building" )
}
defer os . RemoveAll ( buildDir ) // clean up
2019-11-05 16:36:22 +00:00
2019-11-11 18:19:13 +00:00
// First we copy the source definitions into the output - we create a copy which the builds will need (we need to cache this phase somehow)
2019-11-17 12:27:32 +00:00
err = helpers . CopyDir ( p . GetPackage ( ) . GetPath ( ) , buildDir )
2019-11-11 18:19:13 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Could not copy package sources" )
2019-11-08 18:57:23 +00:00
2019-11-11 18:19:13 +00:00
}
if buildertaggedImage == "" {
2019-12-02 15:33:59 +00:00
buildertaggedImage = cs . ImageRepository + "-" + p . GetPackage ( ) . GetFingerPrint ( ) + "-builder"
2019-11-11 18:19:13 +00:00
}
if packageImage == "" {
2019-12-02 15:33:59 +00:00
packageImage = cs . ImageRepository + "-" + p . GetPackage ( ) . GetFingerPrint ( )
}
if cs . PullFirst {
//Best effort pull
cs . Backend . DownloadImage ( CompilerBackendOptions { ImageName : buildertaggedImage } )
cs . Backend . DownloadImage ( CompilerBackendOptions { ImageName : packageImage } )
2019-11-11 18:19:13 +00:00
}
2019-11-10 09:48:07 +00:00
2019-12-06 15:28:42 +00:00
Info ( pkgTag , "Generating :whale: definition for builder image from" , image )
2019-11-26 19:11:51 +00:00
2019-11-11 18:19:13 +00:00
// First we create the builder image
p . WriteBuildImageDefinition ( filepath . Join ( buildDir , p . GetPackage ( ) . GetFingerPrint ( ) + "-builder.dockerfile" ) )
builderOpts := CompilerBackendOptions {
ImageName : buildertaggedImage ,
SourcePath : buildDir ,
DockerFileName : p . GetPackage ( ) . GetFingerPrint ( ) + "-builder.dockerfile" ,
Destination : p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + "-builder.image.tar" ) ,
}
2019-11-10 09:48:07 +00:00
2019-11-11 18:19:13 +00:00
err = cs . Backend . BuildImage ( builderOpts )
if err != nil {
2019-11-11 23:13:03 +00:00
return nil , errors . Wrap ( err , "Could not build image: " + image + " " + builderOpts . DockerFileName )
2019-11-11 18:19:13 +00:00
}
2019-11-10 09:48:07 +00:00
2019-11-11 18:19:13 +00:00
err = cs . Backend . ExportImage ( builderOpts )
if err != nil {
return nil , errors . Wrap ( err , "Could not export image" )
}
2019-11-10 09:48:07 +00:00
2019-11-11 18:19:13 +00:00
// Then we write the step image, which uses the builder one
p . WriteStepImageDefinition ( buildertaggedImage , filepath . Join ( buildDir , p . GetPackage ( ) . GetFingerPrint ( ) + ".dockerfile" ) )
runnerOpts := CompilerBackendOptions {
ImageName : packageImage ,
SourcePath : buildDir ,
DockerFileName : p . GetPackage ( ) . GetFingerPrint ( ) + ".dockerfile" ,
Destination : p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".image.tar" ) ,
}
2019-11-08 18:57:23 +00:00
2019-12-02 15:33:59 +00:00
// if !keepPackageImg {
// err = cs.Backend.ImageDefinitionToTar(runnerOpts)
// if err != nil {
// return nil, errors.Wrap(err, "Could not export image to tar")
// }
// } else {
if err := cs . Backend . BuildImage ( runnerOpts ) ; err != nil {
return nil , errors . Wrap ( err , "Failed building image for " + runnerOpts . ImageName + " " + runnerOpts . DockerFileName )
}
if err := cs . Backend . ExportImage ( runnerOpts ) ; err != nil {
return nil , errors . Wrap ( err , "Failed exporting image" )
2019-11-11 18:19:13 +00:00
}
2019-12-02 15:33:59 +00:00
// }
2019-11-10 09:48:07 +00:00
2019-11-17 14:45:55 +00:00
var diffs [ ] ArtifactLayer
var artifact Artifact
if ! p . ImageUnpack ( ) {
// we have to get diffs only if spec is not unpacked
diffs , err = cs . Backend . Changes ( p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + "-builder.image.tar" ) , p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".image.tar" ) )
if err != nil {
return nil , errors . Wrap ( err , "Could not generate changes from layers" )
}
2019-11-11 18:19:13 +00:00
}
rootfs , err := ioutil . TempDir ( p . GetOutputPath ( ) , "rootfs" )
2019-11-11 23:13:03 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Could not create tempdir" )
}
2019-11-11 18:19:13 +00:00
defer os . RemoveAll ( rootfs ) // clean up
2019-11-08 18:57:23 +00:00
2019-11-11 18:19:13 +00:00
// TODO: Compression and such
2019-12-02 15:33:59 +00:00
err = cs . Backend . ExtractRootfs ( CompilerBackendOptions {
ImageName : packageImage ,
SourcePath : runnerOpts . Destination , Destination : rootfs } , keepPermissions )
2019-11-11 18:19:13 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Could not extract rootfs" )
}
2019-11-17 14:45:55 +00:00
2019-12-02 15:33:59 +00:00
if ! keepImg {
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
// TODO: Handle caching and optionally do not remove things
err = cs . Backend . RemoveImage ( builderOpts )
if err != nil {
// TODO: Have a --fatal flag which enables Warnings to exit.
Warning ( "Could not remove image " , builderOpts . ImageName )
// return nil, errors.Wrap(err, "Could not remove image")
}
err = cs . Backend . RemoveImage ( runnerOpts )
if err != nil {
// TODO: Have a --fatal flag which enables Warnings to exit.
Warning ( "Could not remove image " , builderOpts . ImageName )
// return nil, errors.Wrap(err, "Could not remove image")
}
}
2019-11-17 11:08:13 +00:00
if p . ImageUnpack ( ) {
2019-12-03 16:26:53 +00:00
if len ( p . GetIncludes ( ) ) > 0 {
// strip from includes
cs . stripIncludesFromRootfs ( p . GetIncludes ( ) , rootfs )
}
2019-12-28 15:32:32 +00:00
artifact = NewPackageArtifact ( p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".package.tar" ) )
2019-12-30 11:53:32 +00:00
artifact . SetCompressionType ( cs . CompressionType )
err = artifact . Compress ( rootfs , concurrency )
2019-11-17 11:08:13 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating package archive" )
}
2019-11-17 14:45:55 +00:00
artifact . SetCompileSpec ( p )
} else {
2019-11-26 19:11:51 +00:00
Info ( pkgTag , "Generating delta" )
2019-12-30 11:53:32 +00:00
artifact , err = ExtractArtifactFromDelta ( rootfs , p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".package.tar" ) , diffs , concurrency , keepPermissions , p . GetIncludes ( ) , cs . CompressionType )
2019-11-17 14:45:55 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Could not generate deltas" )
}
2019-12-30 11:53:32 +00:00
2019-11-17 11:08:13 +00:00
artifact . SetCompileSpec ( p )
2019-11-11 18:19:13 +00:00
}
2019-11-17 14:45:55 +00:00
2019-11-22 20:01:29 +00:00
err = artifact . WriteYaml ( p . GetOutputPath ( ) )
if err != nil {
return artifact , err
}
2019-11-26 19:11:51 +00:00
Info ( pkgTag , " :white_check_mark: Done" )
2019-11-11 18:19:13 +00:00
return artifact , nil
}
2019-11-17 11:08:13 +00:00
2019-12-30 11:53:32 +00:00
func ( cs * LuetCompiler ) packageFromImage ( p CompilationSpec , tag string , keepPermissions , keepImg bool , concurrency int ) ( Artifact , error ) {
2019-11-16 13:16:44 +00:00
pkgTag := ":package: " + p . GetPackage ( ) . GetName ( )
2019-11-14 16:45:21 +00:00
Info ( pkgTag , " 🍩 Build starts 🔨 🔨 🔨 " )
2019-11-13 08:42:52 +00:00
builderOpts := CompilerBackendOptions {
ImageName : p . GetImage ( ) ,
Destination : p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".image.tar" ) ,
}
err := cs . Backend . DownloadImage ( builderOpts )
if err != nil {
return nil , errors . Wrap ( err , "Could not download image" )
}
if tag != "" {
err = cs . Backend . CopyImage ( p . GetImage ( ) , tag )
if err != nil {
return nil , errors . Wrap ( err , "Could not download image" )
}
}
err = cs . Backend . ExportImage ( builderOpts )
if err != nil {
return nil , errors . Wrap ( err , "Could not export image" )
}
rootfs , err := ioutil . TempDir ( p . GetOutputPath ( ) , "rootfs" )
if err != nil {
return nil , errors . Wrap ( err , "Could not create tempdir" )
}
defer os . RemoveAll ( rootfs ) // clean up
// TODO: Compression and such
2019-12-02 15:33:59 +00:00
err = cs . Backend . ExtractRootfs ( CompilerBackendOptions {
ImageName : p . GetImage ( ) ,
SourcePath : builderOpts . Destination , Destination : rootfs } , keepPermissions )
2019-11-13 08:42:52 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Could not extract rootfs" )
}
2019-12-28 15:32:32 +00:00
artifact := NewPackageArtifact ( p . Rel ( p . GetPackage ( ) . GetFingerPrint ( ) + ".package.tar" ) )
artifact . SetCompileSpec ( p )
2019-12-30 11:53:32 +00:00
artifact . SetCompressionType ( cs . CompressionType )
2019-11-13 08:42:52 +00:00
2019-12-30 11:53:32 +00:00
err = artifact . Compress ( rootfs , concurrency )
2019-11-13 08:42:52 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Error met while creating package archive" )
}
2019-12-02 15:33:59 +00:00
if ! keepImg {
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
// TODO: Handle caching and optionally do not remove things
err = cs . Backend . RemoveImage ( builderOpts )
if err != nil {
// TODO: Have a --fatal flag which enables Warnings to exit.
Warning ( "Could not remove image " , builderOpts . ImageName )
// return nil, errors.Wrap(err, "Could not remove image")
}
}
2019-11-16 13:16:44 +00:00
Info ( pkgTag , " :white_check_mark: Done" )
2019-12-28 15:32:32 +00:00
2019-11-22 20:01:29 +00:00
err = artifact . WriteYaml ( p . GetOutputPath ( ) )
if err != nil {
return artifact , err
}
2019-11-15 17:11:26 +00:00
return artifact , nil
2019-11-13 08:42:52 +00:00
}
2019-11-15 23:38:07 +00:00
func ( cs * LuetCompiler ) ComputeDepTree ( p CompilationSpec ) ( solver . PackagesAssertions , error ) {
2019-11-11 23:13:03 +00:00
2019-11-29 18:01:49 +00:00
s := solver . NewSolver ( pkg . NewInMemoryDatabase ( false ) , cs . Database , pkg . NewInMemoryDatabase ( false ) )
solution , err := s . Install ( [ ] pkg . Package { p . GetPackage ( ) } )
2019-11-15 23:38:07 +00:00
if err != nil {
return nil , errors . Wrap ( err , "While computing a solution for " + p . GetPackage ( ) . GetName ( ) )
}
2019-11-29 18:01:52 +00:00
dependencies := solution . Order ( cs . Database , p . GetPackage ( ) . GetFingerPrint ( ) )
2019-12-17 18:32:31 +00:00
2019-11-15 23:38:07 +00:00
assertions := solver . PackagesAssertions { }
for _ , assertion := range dependencies { //highly dependent on the order
2019-11-29 18:01:49 +00:00
if assertion . Value {
2019-12-14 14:00:16 +00:00
nthsolution := dependencies . Cut ( assertion . Package )
2019-11-15 23:38:07 +00:00
assertion . Hash = solver . PackageHash {
2019-12-14 14:00:16 +00:00
BuildHash : nthsolution . Drop ( assertion . Package ) . AssertionHash ( ) ,
PackageHash : nthsolution . AssertionHash ( ) ,
2019-11-15 23:38:07 +00:00
}
assertions = append ( assertions , assertion )
}
}
p . SetSourceAssertion ( assertions )
return assertions , nil
}
// Compile is non-parallel
func ( cs * LuetCompiler ) Compile ( concurrency int , keepPermissions bool , p CompilationSpec ) ( Artifact , error ) {
asserts , err := cs . ComputeDepTree ( p )
if err != nil {
panic ( err )
}
p . SetSourceAssertion ( asserts )
return cs . compile ( concurrency , keepPermissions , p )
}
func ( cs * LuetCompiler ) compile ( concurrency int , keepPermissions bool , p CompilationSpec ) ( Artifact , error ) {
2019-11-16 13:16:44 +00:00
Info ( ":package: Compiling" , p . GetPackage ( ) . GetName ( ) , "version" , p . GetPackage ( ) . GetVersion ( ) , ".... :coffee:" )
2019-11-12 07:48:07 +00:00
2019-11-11 23:13:03 +00:00
if len ( p . GetPackage ( ) . GetRequires ( ) ) == 0 && p . GetImage ( ) == "" {
Error ( "Package with no deps and no seed image supplied, bailing out" )
return nil , errors . New ( "Package " + p . GetPackage ( ) . GetFingerPrint ( ) + "with no deps and no seed image supplied, bailing out" )
}
2019-11-10 09:48:07 +00:00
2019-11-11 18:19:13 +00:00
// - If image is set we just generate a plain dockerfile
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
if p . GetImage ( ) != "" {
2019-11-13 08:42:52 +00:00
if p . ImageUnpack ( ) { // If it is just an entire image, create a package from it
2019-12-30 11:53:32 +00:00
return cs . packageFromImage ( p , "" , keepPermissions , cs . KeepImg , concurrency )
2019-11-13 08:42:52 +00:00
}
2019-12-02 15:33:59 +00:00
return cs . compileWithImage ( p . GetImage ( ) , "" , "" , concurrency , keepPermissions , cs . KeepImg , p )
2019-11-11 18:19:13 +00:00
}
2019-11-12 16:31:50 +00:00
// - If image is not set, we read a base_image. Then we will build one image from it to kick-off our build based
// on how we compute the resolvable tree.
// This means to recursively build all the build-images needed to reach that tree part.
// - We later on compute an hash used to identify the image, so each similar deptree keeps the same build image.
2019-11-15 23:38:07 +00:00
dependencies := p . GetSourceAssertion ( ) . Drop ( p . GetPackage ( ) ) // at this point we should have a flattened list of deps to build, including all of them (with all constraints propagated already)
departifacts := [ ] Artifact { } // TODO: Return this somehow
2019-11-11 18:19:13 +00:00
var lastHash string
2019-11-14 16:45:21 +00:00
depsN := 0
currentN := 0
2019-11-16 13:16:44 +00:00
Info ( ":deciduous_tree: Build dependencies for " + p . GetPackage ( ) . GetName ( ) )
2019-11-14 16:45:21 +00:00
for _ , assertion := range dependencies { //highly dependent on the order
2019-11-15 23:38:07 +00:00
depsN ++
2019-11-16 13:16:44 +00:00
Info ( " :arrow_right_hook:" , assertion . Package . GetName ( ) , ":leaves:" , assertion . Package . GetVersion ( ) , "(" , assertion . Package . GetCategory ( ) , ")" )
2019-11-15 23:38:07 +00:00
2019-11-14 16:45:21 +00:00
}
2019-11-11 18:19:13 +00:00
for _ , assertion := range dependencies { //highly dependent on the order
2019-11-15 23:38:07 +00:00
currentN ++
2019-11-16 13:16:44 +00:00
pkgTag := fmt . Sprintf ( ":package: %d/%d %s ⤑ %s" , currentN , depsN , p . GetPackage ( ) . GetName ( ) , assertion . Package . GetName ( ) )
Info ( pkgTag , " :zap: Building dependency" )
2019-11-15 23:38:07 +00:00
compileSpec , err := cs . FromPackage ( assertion . Package )
if err != nil {
2019-11-23 15:28:50 +00:00
return nil , errors . Wrap ( err , "Error while generating compilespec for " + assertion . Package . GetName ( ) )
2019-11-15 23:38:07 +00:00
}
compileSpec . SetOutputPath ( p . GetOutputPath ( ) )
2019-12-02 15:33:59 +00:00
buildImageHash := cs . ImageRepository + ":" + assertion . Hash . BuildHash
currentPackageImageHash := cs . ImageRepository + ":" + assertion . Hash . PackageHash
2019-12-06 15:28:42 +00:00
Debug ( pkgTag , " :arrow_right_hook: :whale: Builder image from" , buildImageHash )
2019-11-16 13:16:44 +00:00
Debug ( pkgTag , " :arrow_right_hook: :whale: Package image name" , currentPackageImageHash )
2019-11-15 23:38:07 +00:00
lastHash = currentPackageImageHash
if compileSpec . GetImage ( ) != "" {
// TODO: Refactor this
2019-11-17 11:08:13 +00:00
if compileSpec . ImageUnpack ( ) { // If it is just an entire image, create a package from it
if compileSpec . GetImage ( ) == "" {
return nil , errors . New ( "No image defined for package: " + assertion . Package . GetName ( ) )
}
2019-11-26 19:11:27 +00:00
Info ( pkgTag , ":whale: Sourcing package from image" , compileSpec . GetImage ( ) )
2019-12-30 11:53:32 +00:00
artifact , err := cs . packageFromImage ( compileSpec , currentPackageImageHash , keepPermissions , cs . KeepImg , concurrency )
2019-11-11 18:19:13 +00:00
if err != nil {
2019-11-26 19:11:27 +00:00
return nil , errors . Wrap ( err , "Failed compiling " + compileSpec . GetPackage ( ) . GetName ( ) )
2019-11-11 18:19:13 +00:00
}
departifacts = append ( departifacts , artifact )
continue
}
2019-11-16 13:16:44 +00:00
Debug ( pkgTag , " :wrench: Compiling " + compileSpec . GetPackage ( ) . GetFingerPrint ( ) + " from image" )
2019-12-02 15:33:59 +00:00
artifact , err := cs . compileWithImage ( compileSpec . GetImage ( ) , buildImageHash , currentPackageImageHash , concurrency , keepPermissions , cs . KeepImg , compileSpec )
2019-11-11 18:19:13 +00:00
if err != nil {
2019-11-26 19:11:27 +00:00
return nil , errors . Wrap ( err , "Failed compiling " + compileSpec . GetPackage ( ) . GetName ( ) )
2019-11-11 18:19:13 +00:00
}
departifacts = append ( departifacts , artifact )
2019-11-16 13:16:44 +00:00
Info ( pkgTag , ":white_check_mark: Done" )
2019-11-15 23:38:07 +00:00
continue
}
2019-11-26 19:11:27 +00:00
Debug ( pkgTag , " :wrench: Compiling " + compileSpec . GetPackage ( ) . GetFingerPrint ( ) + " from tree" )
2019-12-02 15:33:59 +00:00
artifact , err := cs . compileWithImage ( buildImageHash , "" , currentPackageImageHash , concurrency , keepPermissions , cs . KeepImg , compileSpec )
2019-11-15 23:38:07 +00:00
if err != nil {
return nil , errors . Wrap ( err , "Failed compiling " + compileSpec . GetPackage ( ) . GetName ( ) )
// deperrs = append(deperrs, err)
// break // stop at first error
2019-11-11 18:19:13 +00:00
}
2019-11-15 23:38:07 +00:00
departifacts = append ( departifacts , artifact )
2019-11-16 13:16:44 +00:00
Info ( pkgTag , ":collision: Done" )
2019-11-05 16:36:22 +00:00
}
2019-11-26 19:11:27 +00:00
2019-11-16 13:16:44 +00:00
Info ( ":package:" , p . GetPackage ( ) . GetName ( ) , ":cyclone: Building package target from:" , lastHash )
2019-12-02 15:33:59 +00:00
artifact , err := cs . compileWithImage ( lastHash , "" , "" , concurrency , keepPermissions , cs . KeepImg , p )
2019-11-15 17:11:26 +00:00
if err != nil {
return artifact , err
}
artifact . SetDependencies ( departifacts )
2019-11-15 23:38:07 +00:00
artifact . SetSourceAssertion ( p . GetSourceAssertion ( ) )
2019-11-05 16:36:22 +00:00
2019-11-15 17:11:26 +00:00
return artifact , err
2019-11-04 16:16:13 +00:00
}
func ( cs * LuetCompiler ) FromPackage ( p pkg . Package ) ( CompilationSpec , error ) {
2019-12-01 18:11:19 +00:00
pack , err := cs . Database . FindPackageCandidate ( p )
2019-11-04 16:16:13 +00:00
if err != nil {
return nil , err
}
2019-11-10 09:48:07 +00:00
2019-11-04 16:16:13 +00:00
buildFile := pack . Rel ( BuildFile )
if ! helpers . Exists ( buildFile ) {
return nil , errors . New ( "No build file present for " + p . GetFingerPrint ( ) )
}
dat , err := ioutil . ReadFile ( buildFile )
if err != nil {
return nil , err
}
2019-11-10 09:48:07 +00:00
return NewLuetCompilationSpec ( dat , pack )
2019-11-04 16:16:13 +00:00
}
2019-11-04 16:20:00 +00:00
func ( cs * LuetCompiler ) GetBackend ( ) CompilerBackend {
return cs . Backend
}
func ( cs * LuetCompiler ) SetBackend ( b CompilerBackend ) {
cs . Backend = b
}