mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 07:45:02 +00:00
Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
58e857e700 | ||
|
77c7eab1ee | ||
|
6bf91a0b60 | ||
|
c6170fabd6 | ||
|
18881c3283 | ||
|
9da675c12e | ||
|
82f339f493 | ||
|
d5138a6c0b |
19
cmd/build.go
19
cmd/build.go
@@ -114,15 +114,18 @@ Build packages specifying multiple definition trees:
|
||||
pushFinalImages, _ := cmd.Flags().GetBool("push-final-images")
|
||||
pushFinalImagesRepository, _ := cmd.Flags().GetString("push-final-images-repository")
|
||||
pushFinalImagesForce, _ := cmd.Flags().GetBool("push-final-images-force")
|
||||
generateImages, _ := cmd.Flags().GetBool("generate-final-images")
|
||||
|
||||
var results Results
|
||||
backendArgs := viper.GetStringSlice("backend-args")
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
pretend, _ := cmd.Flags().GetBool("pretend")
|
||||
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
|
||||
|
||||
compilerSpecs := compilerspec.NewLuetCompilationspecs()
|
||||
|
||||
var db types.PackageDatabase
|
||||
var results Results
|
||||
var templateFolders []string
|
||||
|
||||
compilerBackend, err := compiler.NewBackend(util.DefaultContext, backendType)
|
||||
helpers.CheckErr(err)
|
||||
@@ -130,13 +133,17 @@ Build packages specifying multiple definition trees:
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
defer db.Clean()
|
||||
|
||||
runtimeDB := pkg.NewInMemoryDatabase(false)
|
||||
defer runtimeDB.Clean()
|
||||
|
||||
installerRecipe := tree.NewInstallerRecipe(runtimeDB)
|
||||
generalRecipe := tree.NewCompilerRecipe(db)
|
||||
|
||||
for _, src := range treePaths {
|
||||
util.DefaultContext.Info("Loading tree", src)
|
||||
helpers.CheckErr(generalRecipe.Load(src))
|
||||
helpers.CheckErr(installerRecipe.Load(src))
|
||||
}
|
||||
templateFolders := []string{}
|
||||
|
||||
if fromRepo {
|
||||
bt, err := installer.LoadBuildTree(generalRecipe, db, util.DefaultContext)
|
||||
@@ -170,6 +177,7 @@ Build packages specifying multiple definition trees:
|
||||
options.WithTemplateFolder(templateFolders),
|
||||
options.WithSolverOptions(opts),
|
||||
options.Wait(wait),
|
||||
options.WithRuntimeDatabase(installerRecipe.GetDatabase()),
|
||||
options.OnlyTarget(onlyTarget),
|
||||
options.PullFirst(pull),
|
||||
options.KeepImg(keepImages),
|
||||
@@ -191,6 +199,10 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
}
|
||||
|
||||
if generateImages {
|
||||
compileropts = append(compileropts, options.EnableGenerateFinalImages)
|
||||
}
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), compileropts...)
|
||||
|
||||
if full {
|
||||
@@ -238,7 +250,7 @@ Build packages specifying multiple definition trees:
|
||||
artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs)
|
||||
|
||||
} else if pretend {
|
||||
toCalculate := []*compilerspec.LuetCompilationSpec{}
|
||||
var toCalculate []*compilerspec.LuetCompilationSpec
|
||||
if full {
|
||||
var err error
|
||||
toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...)
|
||||
@@ -316,6 +328,7 @@ func init() {
|
||||
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
|
||||
buildCmd.Flags().Bool("all", false, "Build all specfiles in the tree")
|
||||
|
||||
buildCmd.Flags().Bool("generate-final-images", false, "Generate final images while building")
|
||||
buildCmd.Flags().Bool("push-final-images", false, "Push final images while building")
|
||||
buildCmd.Flags().Bool("push-final-images-force", false, "Override existing images")
|
||||
buildCmd.Flags().String("push-final-images-repository", "", "Repository where to push final images to")
|
||||
|
@@ -30,7 +30,7 @@ var cfgFile string
|
||||
var Verbose bool
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.30.1"
|
||||
LuetCLIVersion = "0.31.1"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
|
@@ -160,7 +160,7 @@ The above keywords cannot be present in the same spec **at the same time**, or t
|
||||
|
||||
When specifying `requires_final_images: true` luet builds an artifact for each of the packages listed from their compilation specs and it will later *squash* them together in a new container image which is then used in the build process to create an artifact.
|
||||
|
||||
The key difference is about *where* your build is going to run from. By specifying `requires_final_images` it will be constructed a new image with the content of each package - while if setting it to false, it will order the images appropriately and link them together with the Dockerfile `FROM` field. That allows to reuse the same images used to build the packages in the require section - or - create a new one from the result of each package compilation.
|
||||
The key difference is about *where* your build is going to run from. By specifying `requires_final_images` it will be constructed a new image with the content of each package specified and its dependencies - while if setting it to false, it will order the images appropriately and link them together with the Dockerfile `FROM` field. That allows to reuse the same images used to build the packages in the require section - or - create a new one from the result of each package compilation.
|
||||
|
||||
## Keywords
|
||||
|
||||
@@ -365,7 +365,7 @@ _since luet>=0.17.0_
|
||||
|
||||
(optional) A boolean flag which instruct luet to use the final images in the `requires` field.
|
||||
|
||||
By setting `requires_final_images: true` in the compilation spec, packages in the `requires` section will be first compiled, and afterwards the final packages are squashed together in a new image that will be used during build.
|
||||
By setting `requires_final_images: true` in the compilation spec, packages in the `requires` section and its dependencies will be fetched if available or compiled, and afterwards the result is squashed together in a new image that will be used as a source of the build process of the package.
|
||||
|
||||
```yaml
|
||||
requires:
|
||||
|
186
pkg/api/core/template/functions.go
Normal file
186
pkg/api/core/template/functions.go
Normal file
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
Copyright The Helm Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const recursionMaxNums = 1000
|
||||
|
||||
// funcMap returns a mapping of all of the functions that Engine has.
|
||||
//
|
||||
// Because some functions are late-bound (e.g. contain context-sensitive
|
||||
// data), the functions may not all perform identically outside of an Engine
|
||||
// as they will inside of an Engine.
|
||||
//
|
||||
// Known late-bound functions:
|
||||
//
|
||||
// - "include"
|
||||
// - "tpl"
|
||||
//
|
||||
// These are late-bound in Engine.Render(). The
|
||||
// version included in the FuncMap is a placeholder.
|
||||
//
|
||||
func funcMap() template.FuncMap {
|
||||
f := sprig.TxtFuncMap()
|
||||
|
||||
// Add some extra functionality
|
||||
extra := template.FuncMap{
|
||||
"toToml": toTOML,
|
||||
"toYaml": toYAML,
|
||||
"fromYaml": fromYAML,
|
||||
"fromYamlArray": fromYAMLArray,
|
||||
"toJson": toJSON,
|
||||
"fromJson": fromJSON,
|
||||
"fromJsonArray": fromJSONArray,
|
||||
|
||||
// This is a placeholder for the "include" function, which is
|
||||
// late-bound to a template. By declaring it here, we preserve the
|
||||
// integrity of the linter.
|
||||
"include": func(string, interface{}) string { return "not implemented" },
|
||||
}
|
||||
|
||||
for k, v := range extra {
|
||||
f[k] = v
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// toYAML takes an interface, marshals it to yaml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toYAML(v interface{}) string {
|
||||
data, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSuffix(string(data), "\n")
|
||||
}
|
||||
|
||||
// fromYAML converts a YAML document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromYAML(str string) map[string]interface{} {
|
||||
m := map[string]interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &m); err != nil {
|
||||
m["Error"] = err.Error()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// fromYAMLArray converts a YAML array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose YAML parser, and will not parse all valid
|
||||
// YAML documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromYAMLArray(str string) []interface{} {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(str), &a); err != nil {
|
||||
a = []interface{}{err.Error()}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// toTOML takes an interface, marshals it to toml, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toTOML(v interface{}) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
e := toml.NewEncoder(b)
|
||||
err := e.Encode(v)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// toJSON takes an interface, marshals it to json, and returns a string. It will
|
||||
// always return a string, even on marshal error (empty string).
|
||||
//
|
||||
// This is designed to be called from a template.
|
||||
func toJSON(v interface{}) string {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
// Swallow errors inside of a template.
|
||||
return ""
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// fromJSON converts a JSON document into a map[string]interface{}.
|
||||
//
|
||||
// This is not a general-purpose JSON parser, and will not parse all valid
|
||||
// JSON documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string into
|
||||
// m["Error"] in the returned map.
|
||||
func fromJSON(str string) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
|
||||
if err := json.Unmarshal([]byte(str), &m); err != nil {
|
||||
m["Error"] = err.Error()
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// fromJSONArray converts a JSON array into a []interface{}.
|
||||
//
|
||||
// This is not a general-purpose JSON parser, and will not parse all valid
|
||||
// JSON documents. Additionally, because its intended use is within templates
|
||||
// it tolerates errors. It will insert the returned error message string as
|
||||
// the first and only item in the returned array.
|
||||
func fromJSONArray(str string) []interface{} {
|
||||
a := []interface{}{}
|
||||
|
||||
if err := json.Unmarshal([]byte(str), &a); err != nil {
|
||||
a = []interface{}{err.Error()}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func includeTemplate(tmpl *template.Template, includedNames map[string]int) func(name string, data interface{}) (string, error) {
|
||||
return func(name string, data interface{}) (string, error) {
|
||||
var buf strings.Builder
|
||||
if v, ok := includedNames[name]; ok {
|
||||
if v > recursionMaxNums {
|
||||
return "", errors.Wrapf(fmt.Errorf("unable to execute template"), "rendering template has a nested reference name: %s", name)
|
||||
}
|
||||
includedNames[name]++
|
||||
} else {
|
||||
includedNames[name] = 1
|
||||
}
|
||||
err := tmpl.ExecuteTemplate(&buf, name, data)
|
||||
includedNames[name]--
|
||||
return buf.String(), err
|
||||
}
|
||||
}
|
@@ -26,14 +26,23 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/Masterminds/sprig/v3"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
// String templates a string with the interface
|
||||
func String(t string, i interface{}) (string, error) {
|
||||
b := bytes.NewBuffer([]byte{})
|
||||
tmpl, err := template.New("").Funcs(sprig.TxtFuncMap()).Parse(t)
|
||||
|
||||
f := funcMap()
|
||||
|
||||
tmpl := template.New("")
|
||||
|
||||
includedNames := make(map[string]int)
|
||||
|
||||
// Add the 'include' function here so we can close over tmpl.
|
||||
f["include"] = includeTemplate(tmpl, includedNames)
|
||||
|
||||
tmpl, err := tmpl.Funcs(f).Parse(t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@@ -190,5 +190,33 @@ faa: "baz"
|
||||
Expect(res).To(Equal(""))
|
||||
|
||||
})
|
||||
|
||||
It("correctly parses `include`", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
toTemplate := filepath.Join(testDir, "totemplate.yaml")
|
||||
values := filepath.Join(testDir, "values.yaml")
|
||||
d := filepath.Join(testDir, "default.yaml")
|
||||
|
||||
writeFile(toTemplate, `
|
||||
{{- define "app" -}}
|
||||
app_name: {{if .Values.foo}}{{.Values.foo}}{{end}}
|
||||
{{- end -}}
|
||||
{{ include "app" . | indent 4 }}
|
||||
`)
|
||||
writeFile(values, `
|
||||
foo: "bar"
|
||||
`)
|
||||
writeFile(d, ``)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderWithValues([]string{toTemplate}, values, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal(` app_name: bar
|
||||
`))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -481,11 +481,10 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
return a, errors.Wrap(err, "Failed while writing metadata file")
|
||||
}
|
||||
cs.Options.Context.Success(pkgTag, " :white_check_mark: done (empty virtual package)")
|
||||
if cs.Options.PushFinalImages {
|
||||
if err := cs.pushFinalArtifact(a, p, keepPermissions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.finalizeImages(a, p, keepPermissions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@@ -519,20 +518,49 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
}
|
||||
cs.Options.Context.Success(pkgTag, " :white_check_mark: Done building")
|
||||
|
||||
if cs.Options.PushFinalImages {
|
||||
if err := cs.pushFinalArtifact(a, p, keepPermissions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cs.finalizeImages(a, p, keepPermissions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// TODO: A small readaptation of repository_docker.go pushImageFromArtifact()
|
||||
// Move this to a common place
|
||||
func (cs *LuetCompiler) pushFinalArtifact(a *artifact.PackageArtifact, p *compilerspec.LuetCompilationSpec, keepPermissions bool) error {
|
||||
cs.Options.Context.Info("Pushing final image for", a.CompileSpec.Package.HumanReadableString())
|
||||
// finalizeImages finalizes images and generates final artifacts (push them as well if necessary).
|
||||
func (cs *LuetCompiler) finalizeImages(a *artifact.PackageArtifact, p *compilerspec.LuetCompilationSpec, keepPermissions bool) error {
|
||||
|
||||
// TODO: This is a small readaptation of repository_docker.go pushImageFromArtifact().
|
||||
// Maybe can be moved to a common place.
|
||||
|
||||
// We either check if finalization is needed
|
||||
// and push or generate final images here, anything else we just return successfully
|
||||
if !cs.Options.PushFinalImages && !cs.Options.GenerateFinalImages {
|
||||
return nil
|
||||
}
|
||||
|
||||
imageID := fmt.Sprintf("%s:%s", cs.Options.PushFinalImagesRepository, a.CompileSpec.Package.ImageID())
|
||||
metadataImageID := fmt.Sprintf("%s:%s", cs.Options.PushFinalImagesRepository, helpers.SanitizeImageString(a.CompileSpec.GetPackage().GetMetadataFilePath()))
|
||||
|
||||
// Do generate image only, might be required for local iteration without pushing to remote repository
|
||||
if cs.Options.GenerateFinalImages && !cs.Options.PushFinalImages {
|
||||
cs.Options.Context.Info("Generating final image for", a.CompileSpec.Package.HumanReadableString())
|
||||
|
||||
if err := a.GenerateFinalImage(cs.Options.Context, imageID, cs.GetBackend(), true); err != nil {
|
||||
return errors.Wrap(err, "while creating final image")
|
||||
}
|
||||
|
||||
a := artifact.NewPackageArtifact(filepath.Join(p.GetOutputPath(), a.CompileSpec.GetPackage().GetMetadataFilePath()))
|
||||
metadataArchive, err := artifact.CreateArtifactForFile(cs.Options.Context, a.Path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed generating checksums for tree")
|
||||
}
|
||||
if err := metadataArchive.GenerateFinalImage(cs.Options.Context, metadataImageID, cs.Backend, keepPermissions); err != nil {
|
||||
return errors.Wrap(err, "Failed generating metadata tree "+metadataImageID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
cs.Options.Context.Info("Pushing final image for", a.CompileSpec.Package.HumanReadableString())
|
||||
|
||||
// First push the package image
|
||||
if !cs.Backend.ImageAvailable(imageID) || cs.Options.PushFinalImagesForce {
|
||||
@@ -547,7 +575,6 @@ func (cs *LuetCompiler) pushFinalArtifact(a *artifact.PackageArtifact, p *compil
|
||||
}
|
||||
|
||||
// Then the image ID
|
||||
metadataImageID := fmt.Sprintf("%s:%s", cs.Options.PushFinalImagesRepository, helpers.SanitizeImageString(a.CompileSpec.GetPackage().GetMetadataFilePath()))
|
||||
if !cs.Backend.ImageAvailable(metadataImageID) || cs.Options.PushFinalImagesForce {
|
||||
cs.Options.Context.Info("Generating metadata image for", a.CompileSpec.Package.HumanReadableString(), metadataImageID)
|
||||
|
||||
@@ -604,6 +631,10 @@ func (cs *LuetCompiler) findImageHash(imageHash string, p *compilerspec.LuetComp
|
||||
cs.Options.Context.Debug("Resolving image hash for", p.Package.HumanReadableString(), "hash", imageHash, "Pull repositories", p.BuildOptions.PullImageRepository)
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)},
|
||||
genImageList(p.BuildOptions.PullImageRepository, imageHash)...)
|
||||
|
||||
if cs.Options.PushFinalImagesRepository != "" {
|
||||
toChecklist = append(toChecklist, fmt.Sprintf("%s:%s", cs.Options.PushFinalImagesRepository, imageHash))
|
||||
}
|
||||
if exists, which := oneOfImagesExists(toChecklist, cs.Backend); exists {
|
||||
resolvedImage = which
|
||||
}
|
||||
@@ -774,15 +805,15 @@ func (cs *LuetCompiler) FromDatabase(db types.PackageDatabase, minimum bool, dst
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (types.PackagesAssertions, error) {
|
||||
s := solver.NewResolver(cs.Options.SolverOptions.SolverOptions, pkg.NewInMemoryDatabase(false), cs.Database, pkg.NewInMemoryDatabase(false), solver.NewSolverFromOptions(cs.Options.SolverOptions))
|
||||
func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec, db types.PackageDatabase) (types.PackagesAssertions, error) {
|
||||
s := solver.NewResolver(cs.Options.SolverOptions.SolverOptions, pkg.NewInMemoryDatabase(false), db, pkg.NewInMemoryDatabase(false), solver.NewSolverFromOptions(cs.Options.SolverOptions))
|
||||
|
||||
solution, err := s.Install(types.Packages{p.GetPackage()})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
|
||||
dependencies, err := solution.Order(cs.Database, p.GetPackage().GetFingerPrint())
|
||||
dependencies, err := solution.Order(db, p.GetPackage().GetFingerPrint())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While order a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
@@ -800,7 +831,7 @@ func (cs *LuetCompiler) BuildTree(compilerSpecs compilerspec.LuetCompilationspec
|
||||
bt := &BuildTree{}
|
||||
|
||||
for _, sp := range compilerSpecs.All() {
|
||||
ass, err := cs.ComputeDepTree(sp)
|
||||
ass, err := cs.ComputeDepTree(sp, cs.Database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -812,7 +843,7 @@ func (cs *LuetCompiler) BuildTree(compilerSpecs compilerspec.LuetCompilationspec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ass, err := cs.ComputeDepTree(spec)
|
||||
ass, err := cs.ComputeDepTree(spec, cs.Database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -841,7 +872,7 @@ func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompi
|
||||
allDependencies := types.PackagesAssertions{} // Get all packages that will be in deps
|
||||
result := []*compilerspec.LuetCompilationSpec{}
|
||||
for _, spec := range p {
|
||||
sol, err := cs.ComputeDepTree(spec)
|
||||
sol, err := cs.ComputeDepTree(spec, cs.Database)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed querying hashtree")
|
||||
}
|
||||
@@ -910,16 +941,43 @@ func (cs *LuetCompiler) getSpecHash(pkgs types.Packages, salt string) (string, e
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
|
||||
if !p.RequiresFinalImages {
|
||||
return nil
|
||||
}
|
||||
|
||||
joinTag := ">:loop: final images<"
|
||||
|
||||
var fromPackages types.Packages
|
||||
|
||||
if p.RequiresFinalImages {
|
||||
cs.Options.Context.Info(joinTag, "Generating a parent image from final packages")
|
||||
fromPackages = p.Package.GetRequires()
|
||||
cs.Options.Context.Info(joinTag, "Generating a parent image from final packages")
|
||||
|
||||
if cs.Options.RuntimeDatabase != nil {
|
||||
// Create a fake db from runtime which has the target entry as the compiler view
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
cs.Options.RuntimeDatabase.Clone(db)
|
||||
defer db.Clean()
|
||||
|
||||
if err := db.UpdatePackage(p.Package); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// resolve deptree of runtime of p and use it in fromPackages
|
||||
t, err := cs.ComputeDepTree(p, db)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed querying hashtree")
|
||||
}
|
||||
|
||||
for _, a := range t {
|
||||
if !a.Value || a.Package.Matches(p.Package) {
|
||||
continue
|
||||
}
|
||||
|
||||
fromPackages = append(fromPackages, a.Package)
|
||||
cs.Options.Context.Infof("Adding dependency '%s'.", a.Package.HumanReadableString())
|
||||
}
|
||||
} else {
|
||||
// No source image to resolve
|
||||
return nil
|
||||
cs.Options.Context.Info(joinTag, "No runtime db present, first level join only")
|
||||
fromPackages = p.Package.GetRequires() // first level only
|
||||
}
|
||||
|
||||
// First compute a hash and check if image is available. if it is, then directly consume that
|
||||
@@ -930,10 +988,9 @@ func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool
|
||||
|
||||
cs.Options.Context.Info(joinTag, "Searching existing image with hash", overallFp)
|
||||
|
||||
image := cs.findImageHash(overallFp, p)
|
||||
if image != "" {
|
||||
cs.Options.Context.Info("Image already found", image)
|
||||
p.SetImage(image)
|
||||
if img := cs.findImageHash(overallFp, p); img != "" {
|
||||
cs.Options.Context.Info("Image already found", img)
|
||||
p.SetImage(img)
|
||||
return nil
|
||||
}
|
||||
cs.Options.Context.Info(joinTag, "Image not found. Generating image join with hash ", overallFp)
|
||||
@@ -958,28 +1015,58 @@ func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool
|
||||
for _, c := range fromPackages {
|
||||
current++
|
||||
if c != nil && c.Name != "" && c.Version != "" {
|
||||
joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(p.Package.GetRequires()), c.HumanReadableString())
|
||||
joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(fromPackages), c.HumanReadableString())
|
||||
|
||||
cs.Options.Context.Info(joinTag2, "compilation starts")
|
||||
spec, err := cs.FromPackage(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while generating images to join from")
|
||||
// Search if we have already a final-image that was already pushed
|
||||
// for this to work on the same repo, it is required to push final images during build
|
||||
if img := cs.findImageHash(c.ImageID(), p); cs.Options.PullFirst && img != "" {
|
||||
cs.Options.Context.Info("Final image already found", img)
|
||||
if !cs.Backend.ImageExists(img) {
|
||||
if err := cs.Backend.DownloadImage(backend.Options{ImageName: img}); err != nil {
|
||||
return errors.Wrap(err, "failed pulling image "+img+" during extraction")
|
||||
}
|
||||
}
|
||||
|
||||
imgRef, err := cs.Backend.ImageReference(img, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := cs.Options.Context.WithLoggingContext(fmt.Sprintf("final image extract %s", img))
|
||||
_, _, err = image.ExtractTo(
|
||||
ctx,
|
||||
imgRef,
|
||||
joinDir,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
cs.Options.Context.Info("Final image not found for", c.HumanReadableString())
|
||||
|
||||
// If no image was found, we have to build it from scratch
|
||||
cs.Options.Context.Info(joinTag2, "compilation starts")
|
||||
spec, err := cs.FromPackage(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while generating images to join from")
|
||||
}
|
||||
wantsArtifact := true
|
||||
genDepsArtifact := !cs.Options.PackageTargetOnly
|
||||
|
||||
spec.SetOutputPath(p.GetOutputPath())
|
||||
|
||||
artifact, err := cs.compile(concurrency, keepPermissions, &wantsArtifact, &genDepsArtifact, spec)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
|
||||
err = artifact.Unpack(cs.Options.Context, joinDir, keepPermissions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
cs.Options.Context.Info(joinTag2, ":white_check_mark: Done")
|
||||
}
|
||||
wantsArtifact := true
|
||||
genDepsArtifact := !cs.Options.PackageTargetOnly
|
||||
|
||||
spec.SetOutputPath(p.GetOutputPath())
|
||||
|
||||
artifact, err := cs.compile(concurrency, keepPermissions, &wantsArtifact, &genDepsArtifact, spec)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
|
||||
err = artifact.Unpack(cs.Options.Context, joinDir, keepPermissions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
cs.Options.Context.Info(joinTag2, ":white_check_mark: Done")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -22,6 +22,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/api/core/logger"
|
||||
"github.com/mudler/luet/pkg/api/core/types"
|
||||
|
||||
helpers "github.com/mudler/luet/tests/helpers"
|
||||
@@ -1103,4 +1104,115 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(files).ToNot(ContainElement("bin/busybox"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("final images", func() {
|
||||
It("reuses final images", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
installerRecipe := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/join_complex")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = installerRecipe.Load("../../tests/fixtures/join_complex")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(6))
|
||||
logdir, err := ioutil.TempDir("", "log")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(logdir) // clean up
|
||||
|
||||
logPath := filepath.Join(logdir, "logs")
|
||||
var log string
|
||||
readLogs := func() {
|
||||
d, err := ioutil.ReadFile(logPath)
|
||||
Expect(err).To(BeNil())
|
||||
log = string(d)
|
||||
}
|
||||
|
||||
l, err := logger.New(
|
||||
logger.WithFileLogging(
|
||||
logPath,
|
||||
"",
|
||||
),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := context.NewContext(
|
||||
context.WithLogger(l),
|
||||
)
|
||||
|
||||
b := sd.NewSimpleDockerBackend(ctx)
|
||||
|
||||
joinImage := "luet/cache:08738767caa9a7397fad70ae53db85fa" //resulting join image
|
||||
allImages := []string{
|
||||
joinImage,
|
||||
"test/test:c-test-1.2"}
|
||||
|
||||
cleanup := func(imgs ...string) {
|
||||
// Remove the join hash so we force using final images
|
||||
for _, toRemove := range imgs {
|
||||
b.RemoveImage(sd.Options{ImageName: toRemove})
|
||||
}
|
||||
}
|
||||
defer cleanup(allImages...)
|
||||
|
||||
compiler := NewLuetCompiler(b, generalRecipe.GetDatabase(),
|
||||
options.WithFinalRepository("test/test"),
|
||||
options.EnableGenerateFinalImages,
|
||||
options.WithRuntimeDatabase(installerRecipe.GetDatabase()),
|
||||
options.PullFirst(true),
|
||||
options.WithContext(c))
|
||||
|
||||
spec, err := compiler.FromPackage(&types.Package{Name: "x", Category: "test", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler.Options.CompressionType = compression.GZip
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
readLogs()
|
||||
Expect(log).To(And(
|
||||
ContainSubstring("Generating final image for"),
|
||||
ContainSubstring("Adding dependency"),
|
||||
ContainSubstring("Final image not found for test/c-1.2"),
|
||||
))
|
||||
|
||||
Expect(log).ToNot(And(
|
||||
ContainSubstring("No runtime db present, first level join only"),
|
||||
ContainSubstring("Final image already found test/test:c-test-1.2"),
|
||||
))
|
||||
|
||||
os.WriteFile(logPath, []byte{}, os.ModePerm) // cleanup logs
|
||||
// Remove the join hash so we force using final images
|
||||
cleanup(joinImage)
|
||||
|
||||
//compile again
|
||||
By("Recompiling")
|
||||
|
||||
artifacts, errs = compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
// read logs again
|
||||
readLogs()
|
||||
|
||||
Expect(log).To(And(
|
||||
ContainSubstring("Final image already found test/test:f-test-1.2"),
|
||||
))
|
||||
Expect(log).ToNot(And(
|
||||
ContainSubstring("No runtime db present, first level join only"),
|
||||
ContainSubstring("build test/c-1.2 compilation starts"),
|
||||
ContainSubstring("Final image not found for test/c-1.2"),
|
||||
ContainSubstring("a-test-1.2"),
|
||||
))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -121,7 +121,7 @@ func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec,
|
||||
// resolve computes the dependency tree of a compilation spec and returns solver assertions
|
||||
// in order to be able to compile the spec.
|
||||
func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (types.PackagesAssertions, error) {
|
||||
dependencies, err := cs.ComputeDepTree(p)
|
||||
dependencies, err := cs.ComputeDepTree(p, cs.Database)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
|
@@ -49,8 +49,12 @@ type Compiler struct {
|
||||
// Tells wether to push final container images after building
|
||||
PushFinalImages bool
|
||||
PushFinalImagesForce bool
|
||||
|
||||
GenerateFinalImages bool
|
||||
|
||||
// Image repository to push to
|
||||
PushFinalImagesRepository string
|
||||
RuntimeDatabase types.PackageDatabase
|
||||
|
||||
Context types.Context
|
||||
}
|
||||
@@ -90,6 +94,13 @@ func WithOptions(opt *Compiler) func(cfg *Compiler) error {
|
||||
}
|
||||
}
|
||||
|
||||
func WithRuntimeDatabase(db types.PackageDatabase) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.RuntimeDatabase = db
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFinalRepository Sets the final repository where to push
|
||||
// images of built artifacts
|
||||
func WithFinalRepository(r string) func(cfg *Compiler) error {
|
||||
@@ -99,6 +110,11 @@ func WithFinalRepository(r string) func(cfg *Compiler) error {
|
||||
}
|
||||
}
|
||||
|
||||
func EnableGenerateFinalImages(cfg *Compiler) error {
|
||||
cfg.GenerateFinalImages = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func EnablePushFinalImages(cfg *Compiler) error {
|
||||
cfg.PushFinalImages = true
|
||||
return nil
|
||||
|
@@ -49,7 +49,7 @@ func checkMigrationSchema(path string) {
|
||||
defer b.Close()
|
||||
|
||||
for _, m := range migrations {
|
||||
b.Bolt.Update(m)
|
||||
m(b)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -15,26 +15,55 @@
|
||||
|
||||
package database
|
||||
|
||||
import "go.etcd.io/bbolt"
|
||||
import (
|
||||
storm "github.com/asdine/storm"
|
||||
"github.com/mudler/luet/pkg/api/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type schemaMigration func(tx *bbolt.Tx) error
|
||||
type schemaMigration func(*storm.DB) error
|
||||
|
||||
var migrations = []schemaMigration{migrateDefaultPackage}
|
||||
|
||||
var migrateDefaultPackage schemaMigration = func(tx *bbolt.Tx) error {
|
||||
// previously we had pkg.DefaultPackage
|
||||
// IF it's there, rename it to the proper bucket
|
||||
b := tx.Bucket([]byte("DefaultPackage"))
|
||||
if b != nil {
|
||||
newB, err := tx.CreateBucket([]byte("Package"))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
return newB.Put(k, v)
|
||||
})
|
||||
var migrateDefaultPackage schemaMigration = func(bs *storm.DB) error {
|
||||
packs := []types.Package{}
|
||||
|
||||
tx.DeleteBucket([]byte("DefaultPackage"))
|
||||
bs.Bolt.View(
|
||||
func(tx *bbolt.Tx) error {
|
||||
// previously we had pkg.DefaultPackage
|
||||
// IF it's there, collect packages to add to the new schema
|
||||
b := tx.Bucket([]byte("DefaultPackage"))
|
||||
if b != nil {
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
p, err := types.PackageFromYaml(v)
|
||||
if err == nil && p.ID != 0 {
|
||||
packs = append(packs, p)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
for k := range packs {
|
||||
d := &packs[k]
|
||||
d.ID = 0
|
||||
err := bs.Save(d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error saving package to "+d.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// Be sure to delete old only if everything was migrated without any error
|
||||
bs.Bolt.Update(func(tx *bbolt.Tx) error {
|
||||
b := tx.Bucket([]byte("DefaultPackage"))
|
||||
if b != nil {
|
||||
return tx.DeleteBucket([]byte("DefaultPackage"))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,3 +1,7 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.2"
|
||||
requires:
|
||||
- name: "b"
|
||||
category: "test"
|
||||
version: ">=0"
|
@@ -22,7 +22,7 @@ testBuild() {
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package z' "[ -e '$tmpdir/testbuild/z-test-0.1.package.tar.gz' ]"
|
||||
assertTrue 'create package z' "[ -e '$tmpdir/testbuild/x-test-0.1.package.tar.gz' ]"
|
||||
assertTrue 'create package x' "[ -e '$tmpdir/testbuild/x-test-0.1.package.tar.gz' ]"
|
||||
|
||||
mkdir $tmpdir/extract
|
||||
tar -xvf $tmpdir/testbuild/x-test-0.1.package.tar.gz -C $tmpdir/extract
|
||||
|
Reference in New Issue
Block a user