Compare commits

...

21 Commits

Author SHA1 Message Date
Ettore Di Giacinto
19e6054574 Tag 0.9.2 2020-11-10 20:20:27 +01:00
Ettore Di Giacinto
a8624fe451 Move image removal in compileWithImage and further cleanup 2020-11-10 18:48:39 +01:00
Ettore Di Giacinto
14c1d6ef24 Refactor and optimize build process 2020-11-10 18:14:18 +01:00
Ettore Di Giacinto
36c58307e2 Don't export unless needed 2020-11-10 16:57:24 +01:00
Ettore Di Giacinto
665261e526 Tag 0.9.1 2020-11-09 19:42:34 +01:00
Ettore Di Giacinto
794c5984a2 Add pack command 2020-11-09 18:16:22 +01:00
Ettore Di Giacinto
a765147c1d Add templated finalizers 2020-11-08 21:14:19 +01:00
Ettore Di Giacinto
088adf6f3a Tag 0.9 2020-11-08 18:25:59 +01:00
Ettore Di Giacinto
cead09fb9f Merge pull request #148 from mudler/respect_rootfs4conf
Respect rootfs path for configs and url
2020-11-08 18:25:29 +01:00
Daniele Rondina
9a1787ddaf client/local: Handle config_from_host on DownloadFile 2020-11-08 17:06:05 +01:00
Ettore Di Giacinto
b1316b50b4 Add excludes tests 2020-11-08 16:02:11 +01:00
Ettore Di Giacinto
d92ee9e1d9 Add preliminar support for excludes 2020-11-08 15:35:24 +01:00
Ettore Di Giacinto
e7b58eec41 Use sane default for installer script 2020-11-08 14:33:34 +01:00
Ettore Di Giacinto
6a1b64acea Order files before uninstall
Fixes #149
2020-11-08 12:36:41 +01:00
Ettore Di Giacinto
df14fe60fc Tag 0.8.15 2020-11-08 11:07:33 +01:00
Ettore Di Giacinto
459eb01a59 Don't write err to stdout if not present 2020-11-08 10:02:00 +01:00
Daniele Rondina
e6c597c7d3 test-integration/12_config_protect.sh: Use repo url related with rootfs path 2020-11-08 00:05:06 +01:00
Daniele Rondina
e70cdbaaf7 Respect rootfs on repositories urls 2020-11-08 00:00:15 +01:00
Daniele Rondina
eea9dad2c6 tests/integration: Add option config_from_host 2020-11-07 19:14:44 +01:00
Daniele Rondina
513f441bb3 Add option config_from_host 2020-11-07 18:56:25 +01:00
Daniele Rondina
ebe7466fdc Respect rootfs path for load config 2020-11-07 18:28:23 +01:00
57 changed files with 886 additions and 213 deletions

89
cmd/pack.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"path/filepath"
"time"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var packCmd = &cobra.Command{
Use: "pack <package name>",
Short: "pack a custom package",
Long: `pack and creates metadata directly from a source path`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
viper.BindPFlag("source", cmd.Flags().Lookup("source"))
},
Run: func(cmd *cobra.Command, args []string) {
sourcePath := viper.GetString("source")
dst := viper.GetString("destination")
compressionType := viper.GetString("compression")
concurrency := LuetCfg.GetGeneral().Concurrency
if len(args) != 1 {
Fatal("You must specify a package name")
}
packageName := args[0]
p, err := helpers.ParsePackageStr(packageName)
if err != nil {
Fatal("Invalid package string ", packageName, ": ", err.Error())
}
spec := &compiler.LuetCompilationSpec{Package: p}
artifact := compiler.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
artifact.SetCompressionType(compiler.CompressionImplementation(compressionType))
err = artifact.Compress(sourcePath, concurrency)
if err != nil {
Fatal("failed compressing ", packageName, ": ", err.Error())
}
artifact.SetCompileSpec(spec)
filelist, err := artifact.FileList()
if err != nil {
Fatal("failed generating file list for ", packageName, ": ", err.Error())
}
artifact.SetFiles(filelist)
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
err = artifact.WriteYaml(dst)
if err != nil {
Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
packCmd.Flags().String("source", path, "Source folder")
packCmd.Flags().String("destination", path, "Destination folder")
packCmd.Flags().String("compression", "gzip", "Compression alg: none, gzip")
RootCmd.AddCommand(packCmd)
}

View File

@@ -38,7 +38,7 @@ var Verbose bool
var LockedCommands = []string{"install", "uninstall", "upgrade"} var LockedCommands = []string{"install", "uninstall", "upgrade"}
const ( const (
LuetCLIVersion = "0.8.14" LuetCLIVersion = "0.9.2"
LuetEnvPrefix = "LUET" LuetEnvPrefix = "LUET"
) )

View File

@@ -4,14 +4,17 @@ export LUET_NOLOCK=true
LUET_VERSION=0.8.6 LUET_VERSION=0.8.6
LUET_ROOTFS=${LUET_ROOTFS:-/} LUET_ROOTFS=${LUET_ROOTFS:-/}
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/} LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb} LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}
LUET_CONFIG_PROTECT=${LUET_CONFIG_PROTECT:-0} LUET_CONFIG_PROTECT=${LUET_CONFIG_PROTECT:-1}
wget -q https://github.com/mudler/luet/releases/download/0.8.6/luet-0.8.6-linux-amd64 -O luet wget -q https://github.com/mudler/luet/releases/download/0.8.6/luet-0.8.6-linux-amd64 -O luet
chmod +x luet chmod +x luet
mkdir -p /etc/luet/repos.conf.d || true mkdir -p /etc/luet/repos.conf.d || true
mkdir -p $LUET_DATABASE_PATH || true
mkdir -p /var/tmp/luet || true
if [ "${LUET_CONFIG_PROTECT}" = "1" ] ; then if [ "${LUET_CONFIG_PROTECT}" = "1" ] ; then
mkdir -p /etc/luet/config.protect.d || true mkdir -p /etc/luet/config.protect.d || true
wget -q https://raw.githubusercontent.com/mudler/luet/master/contrib/config/config.protect.d/01_etc.yml.example -O /etc/luet/config.protect.d/01_etc.yml wget -q https://raw.githubusercontent.com/mudler/luet/master/contrib/config/config.protect.d/01_etc.yml.example -O /etc/luet/config.protect.d/01_etc.yml
@@ -25,6 +28,7 @@ system:
rootfs: ${LUET_ROOTFS} rootfs: ${LUET_ROOTFS}
database_path: "${LUET_DATABASE_PATH}" database_path: "${LUET_DATABASE_PATH}"
database_engine: "${LUET_DATABASE_ENGINE}" database_engine: "${LUET_DATABASE_ENGINE}"
tmpdir_base: "/var/tmp/luet"
EOF EOF
./luet install repository/luet repository/mocaccino-repository-index ./luet install repository/luet repository/mocaccino-repository-index

View File

@@ -69,6 +69,7 @@
# Default $TMPDIR/tmpluet # Default $TMPDIR/tmpluet
# tmpdir_base: "/tmp/tmpluet" # tmpdir_base: "/tmp/tmpluet"
# #
#
# --------------------------------------------- # ---------------------------------------------
# Repositories configurations directories. # Repositories configurations directories.
# --------------------------------------------- # ---------------------------------------------
@@ -93,6 +94,11 @@
# annotation. # annotation.
# config_protect_skip: false # config_protect_skip: false
# #
# The paths used for load repositories and config
# protects are based on host rootfs.
# If set to false rootfs path is used as prefix.
# config_from_host: true
#
# System repositories # System repositories
# --------------------------------------------- # ---------------------------------------------
# In alternative to define repositories files # In alternative to define repositories files

View File

@@ -516,7 +516,7 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
} }
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format // ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, t CompressionImplementation) (Artifact, error) { func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
archive, err := LuetCfg.GetSystem().TempDir("archive") archive, err := LuetCfg.GetSystem().TempDir("archive")
if err != nil { if err != nil {
@@ -546,7 +546,8 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
} }
// Handle includes in spec. If specified they filter what gets in the package // Handle includes in spec. If specified they filter what gets in the package
if len(includes) > 0 {
if len(includes) > 0 && len(excludes) == 0 {
var includeRegexp []*regexp.Regexp var includeRegexp []*regexp.Regexp
for _, i := range includes { for _, i := range includes {
r, e := regexp.Compile(i) r, e := regexp.Compile(i)
@@ -574,6 +575,81 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
Debug("File ", a.Name, " deleted") Debug("File ", a.Name, " deleted")
} }
} }
} else if len(includes) == 0 && len(excludes) != 0 {
var excludeRegexp []*regexp.Regexp
for _, i := range excludes {
r, e := regexp.Compile(i)
if e != nil {
Warning("Failed compiling regex:", e)
continue
}
excludeRegexp = append(excludeRegexp, r)
}
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
ADD:
for _, a := range l.Diffs.Additions {
for _, i := range excludeRegexp {
if i.MatchString(a.Name) {
continue ADD
}
}
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
} else if len(includes) != 0 && len(excludes) != 0 {
var includeRegexp []*regexp.Regexp
for _, i := range includes {
r, e := regexp.Compile(i)
if e != nil {
Warning("Failed compiling regex:", e)
continue
}
includeRegexp = append(includeRegexp, r)
}
var excludeRegexp []*regexp.Regexp
for _, i := range excludes {
r, e := regexp.Compile(i)
if e != nil {
Warning("Failed compiling regex:", e)
continue
}
excludeRegexp = append(excludeRegexp, r)
}
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
EXCLUDES:
for _, a := range l.Diffs.Additions {
for _, i := range includeRegexp {
if i.MatchString(a.Name) {
for _, e := range excludeRegexp {
if e.MatchString(a.Name) {
continue EXCLUDES
}
}
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
continue EXCLUDES
}
}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
} else { } else {
// Otherwise just grab all // Otherwise just grab all
for _, l := range layers { for _, l := range layers {

View File

@@ -130,7 +130,7 @@ RUN echo bar > /test2`))
err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false) err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, None) artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue()) Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
err = helpers.Untar(artifact.GetPath(), unpacked, false) err = helpers.Untar(artifact.GetPath(), unpacked, false)

View File

@@ -95,7 +95,7 @@ func (*SimpleDocker) ImageExists(imagename string) bool {
cmd := exec.Command("docker", buildarg...) cmd := exec.Command("docker", buildarg...)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
Warning("Image not present") Debug("Image not present")
Debug(string(out)) Debug(string(out))
return false return false
} }

View File

@@ -21,8 +21,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/ghodss/yaml"
"regexp" "regexp"
"strings" "strings"
"sync" "sync"
@@ -181,7 +179,7 @@ func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps CompilationSpec
return artifacts, allErrors return artifacts, allErrors
} }
func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string) error { func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, include bool) error {
var includeRegexp []*regexp.Regexp var includeRegexp []*regexp.Regexp
for _, i := range includes { for _, i := range includes {
r, e := regexp.Compile(i) r, e := regexp.Compile(i)
@@ -213,7 +211,7 @@ func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string
} }
} }
if !match { if include && !match || !include && match {
toRemove = append(toRemove, currentpath) toRemove = append(toRemove, currentpath)
} }
@@ -235,7 +233,58 @@ func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string
return nil return nil
} }
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string, concurrency int, keepPermissions, keepImg bool, p CompilationSpec, generateArtifact bool) (Artifact, error) { func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSpec) (Artifact, error) {
if p.GetPackageDir() != "" {
Info(":tophat: Packing from output dir", p.GetPackageDir())
rootfs = filepath.Join(rootfs, p.GetPackageDir())
}
if len(p.GetIncludes()) > 0 {
// strip from includes
cs.stripFromRootfs(p.GetIncludes(), rootfs, true)
}
if len(p.GetExcludes()) > 0 {
// strip from includes
cs.stripFromRootfs(p.GetExcludes(), rootfs, false)
}
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
artifact.SetCompressionType(cs.CompressionType)
if err := artifact.Compress(rootfs, concurrency); err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive")
}
artifact.SetCompileSpec(p)
return artifact, nil
}
func (cs *LuetCompiler) unpackDelta(rootfs string, concurrency int, keepPermissions bool, p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions) (Artifact, error) {
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
if err := cs.Backend.ExportImage(builderOpts); err != nil {
return nil, errors.Wrap(err, "Could not export image")
}
if !cs.Options.KeepImageExport {
defer os.Remove(builderOpts.Destination)
}
Info(pkgTag, ":hammer: Generating delta")
diffs, err := cs.Backend.Changes(builderOpts.Destination, runnerOpts.Destination)
if err != nil {
return nil, errors.Wrap(err, "Could not generate changes from layers")
}
artifact, err := ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.CompressionType)
if err != nil {
return nil, errors.Wrap(err, "Could not generate deltas")
}
artifact.SetCompileSpec(p)
return artifact, nil
}
func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string,
concurrency int, keepPermissions bool,
p CompilationSpec) (CompilerBackendOptions, CompilerBackendOptions, error) {
var runnerOpts, builderOpts CompilerBackendOptions
pkgTag := ":package: " + p.GetPackage().HumanReadableString() pkgTag := ":package: " + p.GetPackage().HumanReadableString()
@@ -260,32 +309,23 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
packageImage = cs.ImageRepository + "-" + fp packageImage = cs.ImageRepository + "-" + fp
} }
if !cs.Clean {
exists := cs.Backend.ImageExists(buildertaggedImage) && cs.Backend.ImageExists(packageImage)
if art, err := LoadArtifactFromYaml(p); err == nil && (cs.Options.SkipIfMetadataExists || exists) {
Debug("Artifact reloaded. Skipping build")
return art, err
}
}
p.SetSeedImage(image) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver, p.SetSeedImage(image) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver,
// and we build all the images first. // and we build all the images first.
err := os.MkdirAll(p.Rel("build"), os.ModePerm) err := os.MkdirAll(p.Rel("build"), os.ModePerm)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for building") return builderOpts, runnerOpts, errors.Wrap(err, "Error met while creating tempdir for building")
} }
buildDir, err := ioutil.TempDir(p.Rel("build"), "pack") buildDir, err := ioutil.TempDir(p.Rel("build"), "pack")
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for building") return builderOpts, runnerOpts, errors.Wrap(err, "Error met while creating tempdir for building")
} }
defer os.RemoveAll(buildDir) // clean up defer os.RemoveAll(buildDir) // clean up
// First we copy the source definitions into the output - we create a copy which the builds will need (we need to cache this phase somehow) // First we copy the source definitions into the output - we create a copy which the builds will need (we need to cache this phase somehow)
err = helpers.CopyDir(p.GetPackage().GetPath(), buildDir) err = helpers.CopyDir(p.GetPackage().GetPath(), buildDir)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Could not copy package sources") return builderOpts, runnerOpts, errors.Wrap(err, "Could not copy package sources")
} }
// Copy file into the build context, the compilespec might have requested to do so. // Copy file into the build context, the compilespec might have requested to do so.
@@ -299,63 +339,75 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
Info(pkgTag, ":whale: Generating 'builder' image definition from", image) Info(pkgTag, ":whale: Generating 'builder' image definition from", image)
// First we create the builder image // First we create the builder image
p.WriteBuildImageDefinition(filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+"-builder.dockerfile")) if err := p.WriteBuildImageDefinition(filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+"-builder.dockerfile")); err != nil {
builderOpts := CompilerBackendOptions{ return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
}
// Then we write the step image, which uses the builder one
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile")); err != nil {
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
}
builderOpts = CompilerBackendOptions{
ImageName: buildertaggedImage, ImageName: buildertaggedImage,
SourcePath: buildDir, SourcePath: buildDir,
DockerFileName: p.GetPackage().GetFingerPrint() + "-builder.dockerfile", DockerFileName: p.GetPackage().GetFingerPrint() + "-builder.dockerfile",
Destination: p.Rel(p.GetPackage().GetFingerPrint() + "-builder.image.tar"), Destination: p.Rel(p.GetPackage().GetFingerPrint() + "-builder.image.tar"),
} }
runnerOpts = CompilerBackendOptions{
buildBuilderImage := true
if cs.Options.PullFirst {
if err := cs.Backend.DownloadImage(builderOpts); err == nil {
buildBuilderImage = false
}
}
if buildBuilderImage {
if err = cs.Backend.BuildImage(builderOpts); err != nil {
return nil, errors.Wrap(err, "Could not build image: "+image+" "+builderOpts.DockerFileName)
}
}
if err = cs.Backend.ExportImage(builderOpts); err != nil {
return nil, errors.Wrap(err, "Could not export image")
}
if !cs.Options.KeepImageExport {
defer os.Remove(builderOpts.Destination)
}
if cs.Options.Push && buildBuilderImage {
if err = cs.Backend.Push(builderOpts); err != nil {
return nil, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
}
}
// Then we write the step image, which uses the builder one
p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile"))
runnerOpts := CompilerBackendOptions{
ImageName: packageImage, ImageName: packageImage,
SourcePath: buildDir, SourcePath: buildDir,
DockerFileName: p.GetPackage().GetFingerPrint() + ".dockerfile", DockerFileName: p.GetPackage().GetFingerPrint() + ".dockerfile",
Destination: p.Rel(p.GetPackage().GetFingerPrint() + ".image.tar"), Destination: p.Rel(p.GetPackage().GetFingerPrint() + ".image.tar"),
} }
buildPackageImage := true buildAndPush := func(opts CompilerBackendOptions) error {
if cs.Options.PullFirst { buildImage := true
//Best effort pull if cs.Options.PullFirst {
if err := cs.Backend.DownloadImage(runnerOpts); err == nil { if err := cs.Backend.DownloadImage(opts); err == nil {
buildPackageImage = false buildImage = false
}
} }
if buildImage {
if err := cs.Backend.BuildImage(opts); err != nil {
return errors.Wrap(err, "Could not build image: "+image+" "+opts.DockerFileName)
}
if cs.Options.Push {
if err = cs.Backend.Push(opts); err != nil {
return errors.Wrap(err, "Could not push image: "+image+" "+opts.DockerFileName)
}
}
}
return nil
} }
if buildPackageImage { if err := buildAndPush(builderOpts); err != nil {
if err := cs.Backend.BuildImage(runnerOpts); err != nil { return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
return nil, errors.Wrap(err, "Failed building image for "+runnerOpts.ImageName+" "+runnerOpts.DockerFileName)
}
} }
if err := buildAndPush(runnerOpts); err != nil {
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
}
return builderOpts, runnerOpts, nil
}
func (cs *LuetCompiler) genArtifact(p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions, concurrency int, keepPermissions bool) (Artifact, error) {
// generate Artifact
var artifact Artifact
var rootfs string
var err error
unpack := p.ImageUnpack()
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
// If package_dir was specified in the spec, we want to treat the content of the directory
// as the root of our archive. ImageUnpack is implied to be true. override it
if p.GetPackageDir() != "" {
unpack = true
}
// prepare folder content of the image with the package compiled inside
if err := cs.Backend.ExportImage(runnerOpts); err != nil { if err := cs.Backend.ExportImage(runnerOpts); err != nil {
return nil, errors.Wrap(err, "Failed exporting image") return nil, errors.Wrap(err, "Failed exporting image")
} }
@@ -364,23 +416,7 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
defer os.Remove(runnerOpts.Destination) defer os.Remove(runnerOpts.Destination)
} }
if cs.Options.Push && buildPackageImage { rootfs, err = ioutil.TempDir(p.GetOutputPath(), "rootfs")
err = cs.Backend.Push(runnerOpts)
if err != nil {
return nil, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
}
}
var artifact Artifact
unpack := p.ImageUnpack()
// If package_dir was specified in the spec, we want to treat the content of the directory
// as the root of our archive. ImageUnpack is implied to be true. override it
if p.GetPackageDir() != "" {
unpack = true
}
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Could not create tempdir") return nil, errors.Wrap(err, "Could not create tempdir")
} }
@@ -388,62 +424,24 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
// TODO: Compression and such // TODO: Compression and such
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{ err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
ImageName: packageImage, ImageName: runnerOpts.ImageName,
SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions) SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Could not extract rootfs") return nil, errors.Wrap(err, "Could not extract rootfs")
} }
if !keepImg {
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
// TODO: Handle caching and optionally do not remove things
err = cs.Backend.RemoveImage(builderOpts)
if err != nil {
Warning("Could not remove image ", builderOpts.ImageName)
// return nil, errors.Wrap(err, "Could not remove image")
}
err = cs.Backend.RemoveImage(runnerOpts)
if err != nil {
Warning("Could not remove image ", builderOpts.ImageName)
// return nil, errors.Wrap(err, "Could not remove image")
}
}
if !generateArtifact {
return &PackageArtifact{}, nil
}
if unpack { if unpack {
if p.GetPackageDir() != "" { // Take content of container as a base for our package files
Info(":tophat: Packing from output dir", p.GetPackageDir()) artifact, err = cs.unpackFs(rootfs, concurrency, p)
rootfs = filepath.Join(rootfs, p.GetPackageDir())
}
if len(p.GetIncludes()) > 0 {
// strip from includes
cs.stripIncludesFromRootfs(p.GetIncludes(), rootfs)
}
artifact = NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
artifact.SetCompressionType(cs.CompressionType)
err = artifact.Compress(rootfs, concurrency)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive") return nil, errors.Wrap(err, "Error met while creating package archive")
} }
artifact.SetCompileSpec(p)
} else { } else {
Info(pkgTag, ":hammer: Generating delta") // Generate delta between the two images
diffs, err := cs.Backend.Changes(p.Rel(p.GetPackage().GetFingerPrint()+"-builder.image.tar"), p.Rel(p.GetPackage().GetFingerPrint()+".image.tar")) artifact, err = cs.unpackDelta(rootfs, concurrency, keepPermissions, p, builderOpts, runnerOpts)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Could not generate changes from layers") return nil, errors.Wrap(err, "Error met while creating package archive")
} }
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), cs.CompressionType)
if err != nil {
return nil, errors.Wrap(err, "Could not generate deltas")
}
artifact.SetCompileSpec(p)
} }
filelist, err := artifact.FileList() filelist, err := artifact.FileList()
@@ -452,7 +450,6 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
} }
artifact.SetFiles(filelist) artifact.SetFiles(filelist)
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String()) artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
err = artifact.WriteYaml(p.GetOutputPath()) err = artifact.WriteYaml(p.GetOutputPath())
@@ -464,6 +461,43 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
return artifact, nil return artifact, nil
} }
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string,
concurrency int,
keepPermissions, keepImg bool,
p CompilationSpec, generateArtifact bool) (Artifact, error) {
if !cs.Clean {
exists := cs.Backend.ImageExists(buildertaggedImage) && cs.Backend.ImageExists(packageImage)
if art, err := LoadArtifactFromYaml(p); err == nil && (cs.Options.SkipIfMetadataExists || exists) {
Debug("Artifact reloaded. Skipping build")
return art, err
}
}
builderOpts, runnerOpts, err := cs.buildPackageImage(image, buildertaggedImage, packageImage, concurrency, keepPermissions, p)
if err != nil {
return nil, errors.Wrap(err, "failed building package image")
}
if !keepImg {
defer func() {
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
if err := cs.Backend.RemoveImage(builderOpts); err != nil {
Warning("Could not remove image ", builderOpts.ImageName)
}
if err := cs.Backend.RemoveImage(runnerOpts); err != nil {
Warning("Could not remove image ", runnerOpts.ImageName)
}
}()
}
if !generateArtifact {
return &PackageArtifact{}, nil
}
return cs.genArtifact(p, builderOpts, runnerOpts, concurrency, keepPermissions)
}
func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]CompilationSpec, error) { func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]CompilationSpec, error) {
compilerSpecs := NewLuetCompilationspecs() compilerSpecs := NewLuetCompilationspecs()
@@ -654,31 +688,11 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (CompilationSpec, error) {
return nil, err return nil, err
} }
buildFile := pack.Rel(BuildFile) out, err := helpers.RenderFiles(pack.Rel(BuildFile), pack.Rel(DefinitionFile))
if !helpers.Exists(buildFile) {
return nil, errors.New("No build file present for " + p.GetFingerPrint())
}
defFile := pack.Rel(DefinitionFile)
if !helpers.Exists(defFile) {
return nil, errors.New("No build file present for " + p.GetFingerPrint())
}
def, err := ioutil.ReadFile(defFile)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "rendering file "+pack.Rel(BuildFile))
} }
build, err := ioutil.ReadFile(buildFile)
if err != nil {
return nil, err
}
var values templatedata
if err = yaml.Unmarshal(def, &values); err != nil {
return nil, err
}
out, err := helpers.RenderHelm(string(build), values)
if err != nil {
return nil, err
}
return NewLuetCompilationSpec([]byte(out), pack) return NewLuetCompilationSpec([]byte(out), pack)
} }

View File

@@ -265,6 +265,146 @@ var _ = Describe("Compiler", func() {
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue()) Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
}) })
It("Compiles and excludes files", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
tmpdir, err := ioutil.TempDir("", "package")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
err = generalRecipe.Load("../../tests/fixtures/excludes")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
// err = generalRecipe.Tree().ResolveDeps(3)
// Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir)
compiler.SetConcurrency(1)
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles includes and excludes files", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
tmpdir, err := ioutil.TempDir("", "package")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
err = generalRecipe.Load("../../tests/fixtures/excludesincludes")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
// err = generalRecipe.Tree().ResolveDeps(3)
// Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir)
compiler.SetConcurrency(1)
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
})
It("Compiles and excludes ony wanted files also from unpacked packages", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
tmpdir, err := ioutil.TempDir("", "package")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
err = generalRecipe.Load("../../tests/fixtures/excludeimage")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
// err = generalRecipe.Tree().ResolveDeps(3)
// Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir)
compiler.SetConcurrency(1)
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles includes and excludes ony wanted files also from unpacked packages", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
tmpdir, err := ioutil.TempDir("", "package")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
err = generalRecipe.Load("../../tests/fixtures/excludeincludeimage")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
// err = generalRecipe.Tree().ResolveDeps(3)
// Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir)
compiler.SetConcurrency(1)
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles and includes ony wanted files also from unpacked packages", func() { It("Compiles and includes ony wanted files also from unpacked packages", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false)) generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
tmpdir, err := ioutil.TempDir("", "package") tmpdir, err := ioutil.TempDir("", "package")

View File

@@ -147,6 +147,7 @@ type ArtifactLayersSummary struct {
type CompilationSpec interface { type CompilationSpec interface {
ImageUnpack() bool // tells if the definition is just an image ImageUnpack() bool // tells if the definition is just an image
GetIncludes() []string GetIncludes() []string
GetExcludes() []string
RenderBuildImage() (string, error) RenderBuildImage() (string, error)
WriteBuildImageDefinition(string) error WriteBuildImageDefinition(string) error

View File

@@ -102,6 +102,7 @@ type LuetCompilationSpec struct {
OutputPath string `json:"-"` // Where the build processfiles go OutputPath string `json:"-"` // Where the build processfiles go
Unpack bool `json:"unpack"` Unpack bool `json:"unpack"`
Includes []string `json:"includes"` Includes []string `json:"includes"`
Excludes []string `json:"excludes"`
} }
func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) { func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
@@ -148,6 +149,10 @@ func (cs *LuetCompilationSpec) GetIncludes() []string {
return cs.Includes return cs.Includes
} }
func (cs *LuetCompilationSpec) GetExcludes() []string {
return cs.Excludes
}
func (cs *LuetCompilationSpec) GetRetrieve() []string { func (cs *LuetCompilationSpec) GetRetrieve() []string {
return cs.Retrieve return cs.Retrieve
} }

View File

@@ -97,7 +97,7 @@ type LuetSystemConfig struct {
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"` TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
} }
func (sc LuetSystemConfig) GetRepoDatabaseDirPath(name string) string { func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath) dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
dbpath = filepath.Join(dbpath, "repos/"+name) dbpath = filepath.Join(dbpath, "repos/"+name)
err := os.MkdirAll(dbpath, os.ModePerm) err := os.MkdirAll(dbpath, os.ModePerm)
@@ -107,7 +107,7 @@ func (sc LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
return dbpath return dbpath
} }
func (sc LuetSystemConfig) GetSystemRepoDatabaseDirPath() string { func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
dbpath := filepath.Join(sc.Rootfs, dbpath := filepath.Join(sc.Rootfs,
sc.DatabasePath) sc.DatabasePath)
err := os.MkdirAll(dbpath, os.ModePerm) err := os.MkdirAll(dbpath, os.ModePerm)
@@ -117,7 +117,7 @@ func (sc LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
return dbpath return dbpath
} }
func (sc LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) { func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
var cachepath string var cachepath string
if sc.PkgsCachePath != "" { if sc.PkgsCachePath != "" {
cachepath = sc.PkgsCachePath cachepath = sc.PkgsCachePath
@@ -135,6 +135,10 @@ func (sc LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
return return
} }
func (sc *LuetSystemConfig) GetRootFsAbs() (string, error) {
return filepath.Abs(sc.Rootfs)
}
type LuetRepository struct { type LuetRepository struct {
Name string `json:"name" yaml:"name" mapstructure:"name"` Name string `json:"name" yaml:"name" mapstructure:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description"` Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description"`
@@ -204,6 +208,7 @@ type LuetConfig struct {
RepositoriesConfDir []string `mapstructure:"repos_confdir"` RepositoriesConfDir []string `mapstructure:"repos_confdir"`
ConfigProtectConfDir []string `mapstructure:"config_protect_confdir"` ConfigProtectConfDir []string `mapstructure:"config_protect_confdir"`
ConfigProtectSkip bool `mapstructure:"config_protect_skip"` ConfigProtectSkip bool `mapstructure:"config_protect_skip"`
ConfigFromHost bool `mapstructure:"config_from_host"`
CacheRepositories []LuetRepository `mapstructure:"repetitors"` CacheRepositories []LuetRepository `mapstructure:"repetitors"`
SystemRepositories []LuetRepository `mapstructure:"repositories"` SystemRepositories []LuetRepository `mapstructure:"repositories"`
@@ -251,6 +256,8 @@ func GenDefault(viper *v.Viper) {
viper.SetDefault("repos_confdir", []string{"/etc/luet/repos.conf.d"}) viper.SetDefault("repos_confdir", []string{"/etc/luet/repos.conf.d"})
viper.SetDefault("config_protect_confdir", []string{"/etc/luet/config.protect.d"}) viper.SetDefault("config_protect_confdir", []string{"/etc/luet/config.protect.d"})
viper.SetDefault("config_protect_skip", false) viper.SetDefault("config_protect_skip", false)
// TODO: Set default to false when we are ready for migration.
viper.SetDefault("config_from_host", true)
viper.SetDefault("cache_repositories", []string{}) viper.SetDefault("cache_repositories", []string{})
viper.SetDefault("system_repositories", []string{}) viper.SetDefault("system_repositories", []string{})

View File

@@ -24,6 +24,37 @@ import (
copy "github.com/otiai10/copy" copy "github.com/otiai10/copy"
) )
func OrderFiles(target string, files []string) ([]string, []string) {
var newFiles []string
var notPresent []string
for _, f := range files {
target := filepath.Join(target, f)
fi, err := os.Lstat(target)
if err != nil {
notPresent = append(notPresent, f)
continue
}
if m := fi.Mode(); !m.IsDir() {
newFiles = append(newFiles, f)
}
}
for _, f := range files {
target := filepath.Join(target, f)
fi, err := os.Lstat(target)
if err != nil {
continue
}
if m := fi.Mode(); m.IsDir() {
newFiles = append(newFiles, f)
}
}
return newFiles, notPresent
}
func ListDir(dir string) ([]string, error) { func ListDir(dir string) ([]string, error) {
content := []string{} content := []string{}

View File

@@ -16,6 +16,10 @@
package helpers_test package helpers_test
import ( import (
"io/ioutil"
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/helpers" . "github.com/mudler/luet/pkg/helpers"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
@@ -28,4 +32,33 @@ var _ = Describe("Helpers", func() {
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse()) Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
}) })
}) })
Context("Orders dir and files correctly", func() {
It("puts files first and folders at end", func() {
testDir, err := ioutil.TempDir(os.TempDir(), "test")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(testDir)
err = ioutil.WriteFile(filepath.Join(testDir, "foo"), []byte("test\n"), 0644)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(testDir, "baz"), []byte("test\n"), 0644)
Expect(err).ToNot(HaveOccurred())
err = os.MkdirAll(filepath.Join(testDir, "bar"), 0755)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(testDir, "bar", "foo"), []byte("test\n"), 0644)
Expect(err).ToNot(HaveOccurred())
err = os.MkdirAll(filepath.Join(testDir, "baz2"), 0755)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(testDir, "baz2", "foo"), []byte("test\n"), 0644)
Expect(err).ToNot(HaveOccurred())
ordered, notExisting := OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
Expect(notExisting).To(Equal([]string{"notexisting"}))
})
})
}) })

View File

@@ -1,7 +1,10 @@
package helpers package helpers
import ( import (
"io/ioutil"
"github.com/pkg/errors" "github.com/pkg/errors"
"gopkg.in/yaml.v2"
"helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine" "helm.sh/helm/v3/pkg/engine"
@@ -31,3 +34,26 @@ func RenderHelm(template string, values map[string]interface{}) (string, error)
return out["templates"], nil return out["templates"], nil
} }
type templatedata map[string]interface{}
func RenderFiles(toTemplate, valuesFile string) (string, error) {
raw, err := ioutil.ReadFile(toTemplate)
if err != nil {
return "", errors.Wrap(err, "reading file "+toTemplate)
}
if !Exists(valuesFile) {
return "", errors.Wrap(err, "file not existing "+valuesFile)
}
def, err := ioutil.ReadFile(valuesFile)
if err != nil {
return "", errors.Wrap(err, "reading file "+valuesFile)
}
var values templatedata
if err = yaml.Unmarshal(def, &values); err != nil {
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
}
return RenderHelm(string(raw), values)
}

View File

@@ -38,15 +38,26 @@ func NewLocalClient(r RepoData) *LocalClient {
func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) { func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
var err error var err error
rootfs := ""
artifactName := path.Base(artifact.GetPath()) artifactName := path.Base(artifact.GetPath())
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName) cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
if !config.LuetCfg.ConfigFromHost {
rootfs, err = config.LuetCfg.GetSystem().GetRootFsAbs()
if err != nil {
return nil, err
}
}
// Check if file is already in cache // Check if file is already in cache
if helpers.Exists(cacheFile) { if helpers.Exists(cacheFile) {
Info("Use artifact", artifactName, "from cache.") Info("Use artifact", artifactName, "from cache.")
} else { } else {
ok := false ok := false
for _, uri := range c.RepoData.Urls { for _, uri := range c.RepoData.Urls {
uri = filepath.Join(rootfs, uri)
Info("Downloading artifact", artifactName, "from", uri) Info("Downloading artifact", artifactName, "from", uri)
//defer os.Remove(file.Name()) //defer os.Remove(file.Name())
@@ -72,8 +83,20 @@ func (c *LocalClient) DownloadFile(name string) (string, error) {
var err error var err error
var file *os.File = nil var file *os.File = nil
rootfs := ""
if !config.LuetCfg.ConfigFromHost {
rootfs, err = config.LuetCfg.GetSystem().GetRootFsAbs()
if err != nil {
return "", err
}
}
ok := false ok := false
for _, uri := range c.RepoData.Urls { for _, uri := range c.RepoData.Urls {
uri = filepath.Join(rootfs, uri)
Info("Downloading file", name, "from", uri) Info("Downloading file", name, "from", uri)
file, err = config.LuetCfg.GetSystem().TempFile("localclient") file, err = config.LuetCfg.GetSystem().TempFile("localclient")
if err != nil { if err != nil {

View File

@@ -19,6 +19,7 @@ package installer
import ( import (
"io/ioutil" "io/ioutil"
"path" "path"
"path/filepath"
"regexp" "regexp"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
@@ -29,8 +30,21 @@ import (
func LoadConfigProtectConfs(c *LuetConfig) error { func LoadConfigProtectConfs(c *LuetConfig) error {
var regexConfs = regexp.MustCompile(`.yml$`) var regexConfs = regexp.MustCompile(`.yml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, cdir := range c.ConfigProtectConfDir { for _, cdir := range c.ConfigProtectConfDir {
cdir = filepath.Join(rootfs, cdir)
Debug("Parsing Config Protect Directory", cdir, "...") Debug("Parsing Config Protect Directory", cdir, "...")
files, err := ioutil.ReadDir(cdir) files, err := ioutil.ReadDir(cdir)

View File

@@ -30,7 +30,6 @@ import (
. "github.com/mudler/luet/pkg/logger" . "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package" pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver" "github.com/mudler/luet/pkg/solver"
"github.com/mudler/luet/pkg/tree"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -453,8 +452,7 @@ func (l *LuetInstaller) install(syncedRepos Repositories, cp pkg.Packages, s *Sy
return errors.Wrap(err, "Failed creating package") return errors.Wrap(err, "Failed creating package")
} }
} }
executedFinalizer := map[string]bool{} var toFinalize []pkg.Package
if !l.Options.NoDeps { if !l.Options.NoDeps {
// TODO: Lower those errors as warning // TODO: Lower those errors as warning
for _, w := range p { for _, w := range p {
@@ -466,36 +464,17 @@ func (l *LuetInstaller) install(syncedRepos Repositories, cp pkg.Packages, s *Sy
ORDER: ORDER:
for _, ass := range ordered { for _, ass := range ordered {
if ass.Value { if ass.Value {
installed, ok := toInstall[ass.Package.GetFingerPrint()] installed, ok := toInstall[ass.Package.GetFingerPrint()]
if !ok { if !ok {
// It was a dep already installed in the system, so we can skip it safely // It was a dep already installed in the system, so we can skip it safely
continue ORDER continue ORDER
} }
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package) treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
if err != nil { if err != nil {
return errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString()) return errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString())
} }
if helpers.Exists(treePackage.Rel(tree.FinalizerFile)) {
finalizerRaw, err := ioutil.ReadFile(treePackage.Rel(tree.FinalizerFile))
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error reading file "+treePackage.Rel(tree.FinalizerFile))
}
if _, exists := executedFinalizer[ass.Package.GetFingerPrint()]; !exists {
Info("Executing finalizer for " + ass.Package.HumanReadableString())
finalizer, err := NewLuetFinalizerFromYaml(finalizerRaw)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error reading finalizer "+treePackage.Rel(tree.FinalizerFile))
}
err = finalizer.RunInstall(s)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error executing install finalizer "+treePackage.Rel(tree.FinalizerFile))
}
executedFinalizer[ass.Package.GetFingerPrint()] = true
}
}
toFinalize = append(toFinalize, treePackage)
} }
} }
@@ -506,29 +485,11 @@ func (l *LuetInstaller) install(syncedRepos Repositories, cp pkg.Packages, s *Sy
if err != nil { if err != nil {
return errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString()) return errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString())
} }
if helpers.Exists(treePackage.Rel(tree.FinalizerFile)) { toFinalize = append(toFinalize, treePackage)
finalizerRaw, err := ioutil.ReadFile(treePackage.Rel(tree.FinalizerFile))
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error reading file "+treePackage.Rel(tree.FinalizerFile))
}
if _, exists := executedFinalizer[c.Package.GetFingerPrint()]; !exists {
Info(":shell: Executing finalizer for " + c.Package.HumanReadableString())
finalizer, err := NewLuetFinalizerFromYaml(finalizerRaw)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error reading finalizer "+treePackage.Rel(tree.FinalizerFile))
}
err = finalizer.RunInstall(s)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Error executing install finalizer "+treePackage.Rel(tree.FinalizerFile))
}
executedFinalizer[c.Package.GetFingerPrint()] = true
}
}
} }
} }
return nil return s.ExecuteFinalizers(toFinalize, l.Options.Force)
} }
func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, error) { func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, error) {
@@ -631,8 +592,10 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
cp.Map(files) cp.Map(files)
} }
toRemove, notPresent := helpers.OrderFiles(s.Target, files)
// Remove from target // Remove from target
for _, f := range files { for _, f := range toRemove {
target := filepath.Join(s.Target, f) target := filepath.Join(s.Target, f)
if !config.LuetCfg.ConfigProtectSkip && cp.Protected(f) { if !config.LuetCfg.ConfigProtectSkip && cp.Protected(f) {
@@ -650,10 +613,7 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
fi, err := os.Lstat(target) fi, err := os.Lstat(target)
if err != nil { if err != nil {
Warning("File not present in the system target ?", target, err.Error()) Warning("File not found (it was before?) ", err.Error())
if err = os.Remove(target); err != nil {
Warning("Failed removing file", target, err.Error())
}
continue continue
} }
switch mode := fi.Mode(); { switch mode := fi.Mode(); {
@@ -663,15 +623,29 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
Warning("Failed reading folder", target, err.Error()) Warning("Failed reading folder", target, err.Error())
} }
if len(files) != 0 { if len(files) != 0 {
Warning("Preserving not-empty folder", target, err.Error()) Debug("Preserving not-empty folder", target)
continue continue
} }
} }
if err = os.Remove(target); err != nil { if err = os.Remove(target); err != nil {
Warning("Failed removing file (not present in the system target ?)", target, err.Error()) Warning("Failed removing file (maybe not present in the system target anymore ?)", target, err.Error())
} }
} }
for _, f := range notPresent {
target := filepath.Join(s.Target, f)
if !config.LuetCfg.ConfigProtectSkip && cp.Protected(f) {
Debug("Preserving protected file:", f)
continue
}
if err = os.Remove(target); err != nil {
Debug("Failed removing file (not present in the system target)", target, err.Error())
}
}
err = s.Database.RemovePackageFiles(p) err = s.Database.RemovePackageFiles(p)
if err != nil { if err != nil {
return errors.Wrap(err, "Failed removing package files from database") return errors.Wrap(err, "Failed removing package files from database")

View File

@@ -1,7 +1,12 @@
package installer package installer
import ( import (
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/pkg/helpers"
pkg "github.com/mudler/luet/pkg/package" pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
"github.com/pkg/errors"
) )
type System struct { type System struct {
@@ -12,3 +17,31 @@ type System struct {
func (s *System) World() (pkg.Packages, error) { func (s *System) World() (pkg.Packages, error) {
return s.Database.World(), nil return s.Database.World(), nil
} }
type templatedata map[string]interface{}
func (s *System) ExecuteFinalizers(packs []pkg.Package, force bool) error {
executedFinalizer := map[string]bool{}
for _, p := range packs {
if helpers.Exists(p.Rel(tree.FinalizerFile)) {
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile))
if err != nil && !force {
return errors.Wrap(err, "reading file "+p.Rel(tree.FinalizerFile))
}
if _, exists := executedFinalizer[p.GetFingerPrint()]; !exists {
Info("Executing finalizer for " + p.HumanReadableString())
finalizer, err := NewLuetFinalizerFromYaml([]byte(out))
if err != nil && !force {
return errors.Wrap(err, "Error reading finalizer "+p.Rel(tree.FinalizerFile))
}
err = finalizer.RunInstall(s)
if err != nil && !force {
return errors.Wrap(err, "Error executing install finalizer "+p.Rel(tree.FinalizerFile))
}
executedFinalizer[p.GetFingerPrint()] = true
}
}
}
return nil
}

View File

@@ -19,6 +19,7 @@ package repository
import ( import (
"io/ioutil" "io/ioutil"
"path" "path"
"path/filepath"
"regexp" "regexp"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
@@ -29,8 +30,21 @@ import (
func LoadRepositories(c *LuetConfig) error { func LoadRepositories(c *LuetConfig) error {
var regexRepo = regexp.MustCompile(`.yml$|.yaml$`) var regexRepo = regexp.MustCompile(`.yml$|.yaml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, rdir := range c.RepositoriesConfDir { for _, rdir := range c.RepositoriesConfDir {
rdir = filepath.Join(rootfs, rdir)
Debug("Parsing Repository Directory", rdir, "...") Debug("Parsing Repository Directory", rdir, "...")
files, err := ioutil.ReadDir(rdir) files, err := ioutil.ReadDir(rdir)

14
tests/fixtures/excludeimage/build.yaml vendored Normal file
View File

@@ -0,0 +1,14 @@
requires:
- category: "layer"
name: "seed"
version: "1.0"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- echo artifact43 > /marvin
unpack: true
excludes:
- marvin

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,2 @@
image: alpine
unpack: true

View File

@@ -0,0 +1,3 @@
category: "layer"
name: "seed"
version: "1.0"

View File

@@ -0,0 +1,17 @@
requires:
- category: "layer"
name: "seed"
version: "1.0"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- echo artifact43 > /marvin
unpack: true
excludes:
- marvin
includes:
- test.*
- mar.*

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,2 @@
image: alpine
unpack: true

View File

@@ -0,0 +1,3 @@
category: "layer"
name: "seed"
version: "1.0"

11
tests/fixtures/excludes/build.yaml vendored Normal file
View File

@@ -0,0 +1,11 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- echo artifact43 > /marvin
- echo "foo" > /marvot
excludes:
- marvot

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,14 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- echo artifact43 > /marvin
- echo "foo" > /marvot
excludes:
- marvot
includes:
- /test5
- mar.*

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,2 @@
image: "alpine"
unpack: true

View File

@@ -0,0 +1,3 @@
category: "seed"
name: "alpine"
version: "1.0"

View File

@@ -0,0 +1,2 @@
install:
- echo "{{.Values.name}}" > /tmp/foo

View File

@@ -43,6 +43,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -48,6 +48,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -51,6 +51,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -28,6 +28,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -43,6 +43,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -43,6 +43,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -71,6 +71,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -70,6 +70,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -42,6 +42,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -42,6 +42,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -50,6 +50,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -42,6 +42,7 @@ system:
rootfs: / rootfs: /
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -44,6 +44,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -71,6 +71,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -11,18 +11,19 @@ oneTimeTearDown() {
} }
testBuild() { testBuild() {
mkdir $tmpdir/testbuild mkdir $tmpdir/testrootfs/testbuild -p
luet build --tree "$ROOT_DIR/tests/fixtures/config_protect" --destination $tmpdir/testbuild --compression gzip test/a luet build --tree "$ROOT_DIR/tests/fixtures/config_protect" \
--destination $tmpdir/testrootfs/testbuild --compression gzip test/a
buildst=$? buildst=$?
assertEquals 'builds successfully' "$buildst" "0" assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package' "[ -e '$tmpdir/testbuild/a-test-1.0.package.tar.gz' ]" assertTrue 'create package' "[ -e '$tmpdir/testrootfs/testbuild/a-test-1.0.package.tar.gz' ]"
} }
testRepo() { testRepo() {
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]" assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
luet create-repo --tree "$ROOT_DIR/tests/fixtures/config_protect" \ luet create-repo --tree "$ROOT_DIR/tests/fixtures/config_protect" \
--output $tmpdir/testbuild \ --output $tmpdir/testrootfs/testbuild \
--packages $tmpdir/testbuild \ --packages $tmpdir/testrootfs/testbuild \
--name "test" \ --name "test" \
--descr "Test Repo" \ --descr "Test Repo" \
--urls $tmpdir/testrootfs \ --urls $tmpdir/testrootfs \
@@ -30,15 +31,14 @@ testRepo() {
createst=$? createst=$?
assertEquals 'create repo successfully' "$createst" "0" assertEquals 'create repo successfully' "$createst" "0"
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]" assertTrue 'create repository' "[ -e '$tmpdir/testrootfs/testbuild/repository.yaml' ]"
} }
testConfig() { testConfig() {
mkdir $tmpdir/testrootfs
mkdir $tmpdir/config.protect.d mkdir $tmpdir/testrootfs/etc/luet/config.protect.d -p
cat <<EOF > $tmpdir/config.protect.d/conf1.yml cat <<EOF > $tmpdir/testrootfs/etc/luet/config.protect.d/conf1.yml
name: "protect1" name: "protect1"
dirs: dirs:
- /etc/ - /etc/
@@ -52,13 +52,14 @@ system:
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_protect_confdir: config_protect_confdir:
- $tmpdir/config.protect.d - /etc/luet/config.protect.d
config_from_host: false
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"
enable: true enable: true
urls: urls:
- "$tmpdir/testbuild" - "/testbuild"
EOF EOF
luet config --config $tmpdir/luet.yaml luet config --config $tmpdir/luet.yaml
res=$? res=$?

View File

@@ -54,6 +54,7 @@ system:
database_engine: "boltdb" database_engine: "boltdb"
config_protect_confdir: config_protect_confdir:
- $tmpdir/config.protect.d - $tmpdir/config.protect.d
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -61,6 +61,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -42,6 +42,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -42,6 +42,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -52,6 +52,7 @@ system:
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_protect_skip: true config_protect_skip: true
config_from_host: true
config_protect_confdir: config_protect_confdir:
- $tmpdir/config.protect.d - $tmpdir/config.protect.d
repositories: repositories:

View File

@@ -43,6 +43,7 @@ system:
rootfs: $tmpdir/testrootfs rootfs: $tmpdir/testrootfs
database_path: "/" database_path: "/"
database_engine: "boltdb" database_engine: "boltdb"
config_from_host: true
repositories: repositories:
- name: "main" - name: "main"
type: "disk" type: "disk"

View File

@@ -0,0 +1,77 @@
#!/bin/bash
export LUET_NOLOCK=true
oneTimeSetUp() {
export tmpdir="$(mktemp -d)"
}
oneTimeTearDown() {
rm -rf "$tmpdir"
}
testBuild() {
mkdir $tmpdir/testbuild
luet build --tree "$ROOT_DIR/tests/fixtures/templatedfinalizers" --destination $tmpdir/testbuild --compression gzip --all > /dev/null
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package' "[ -e '$tmpdir/testbuild/alpine-seed-1.0.package.tar.gz' ]"
}
testRepo() {
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
luet create-repo --tree "$ROOT_DIR/tests/fixtures/templatedfinalizers" \
--output $tmpdir/testbuild \
--packages $tmpdir/testbuild \
--name "test" \
--descr "Test Repo" \
--urls $tmpdir/testrootfs \
--type disk > /dev/null
createst=$?
assertEquals 'create repo successfully' "$createst" "0"
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
}
testConfig() {
mkdir $tmpdir/testrootfs
cat <<EOF > $tmpdir/luet.yaml
general:
debug: true
system:
rootfs: $tmpdir/testrootfs
database_path: "/"
database_engine: "boltdb"
config_from_host: true
repositories:
- name: "main"
type: "disk"
enable: true
urls:
- "$tmpdir/testbuild"
EOF
luet config --config $tmpdir/luet.yaml
res=$?
assertEquals 'config test successfully' "$res" "0"
}
testInstall() {
luet install --config $tmpdir/luet.yaml seed/alpine
#luet install --config $tmpdir/luet.yaml test/c-1.0 > /dev/null
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/bin/busybox' ]"
assertTrue 'finalizer runs' "[ -e '$tmpdir/testrootfs/tmp/foo' ]"
assertEquals 'finalizer printed used shell' "$(cat $tmpdir/testrootfs/tmp/foo)" 'alpine'
}
testCleanup() {
luet cleanup --config $tmpdir/luet.yaml
installst=$?
assertEquals 'install test successfully' "$installst" "0"
}
# Load shUnit2.
. "$ROOT_DIR/tests/integration/shunit2"/shunit2