mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 15:54:39 +00:00
Compare commits
95 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
54be45dcff | ||
|
413572a8e3 | ||
|
ecc41ce370 | ||
|
dd6501a642 | ||
|
a7b355ed2f | ||
|
9f3e7fd0b2 | ||
|
ac3843e342 | ||
|
612477718e | ||
|
802b0b5201 | ||
|
c5587f9dfc | ||
|
c27d4d258e | ||
|
9202bcbbbe | ||
|
fadd5a10a3 | ||
|
d297b92483 | ||
|
7ba7add2a8 | ||
|
57c769b4a5 | ||
|
44cae094e8 | ||
|
c022c75239 | ||
|
3250d63072 | ||
|
b8961be793 | ||
|
036b5c08c6 | ||
|
c7b79bf630 | ||
|
83cb6a2804 | ||
|
182afa315a | ||
|
ec19b34ca8 | ||
|
88307b1912 | ||
|
a83be204e8 | ||
|
b8352a81a2 | ||
|
ebf907fb45 | ||
|
f0a34f1cf0 | ||
|
4f1e4c0b41 | ||
|
c736c002af | ||
|
2c32af2951 | ||
|
ce349ca1b7 | ||
|
662742851a | ||
|
f8ef1c0889 | ||
|
23513f2c75 | ||
|
268239a561 | ||
|
f8989e464e | ||
|
0028dd3a92 | ||
|
caa1cfad5c | ||
|
39839edda9 | ||
|
ecd4be4ad3 | ||
|
675170939d | ||
|
0f1acac89b | ||
|
42f5210764 | ||
|
9eda81667b | ||
|
94f692266c | ||
|
c13a2174c4 | ||
|
f07cf6c245 | ||
|
515017cabd | ||
|
0bfb33db92 | ||
|
c9c24dd174 | ||
|
194cfda8a4 | ||
|
233429bbeb | ||
|
d84f6b31fd | ||
|
98b01ce00b | ||
|
749a4cb615 | ||
|
57e19c61e7 | ||
|
5ee1e28b9c | ||
|
21bd76af9c | ||
|
89bd7c2281 | ||
|
49d7efa9ea | ||
|
b3e3abec8f | ||
|
92e73051a0 | ||
|
fd7405c2cc | ||
|
2448f3175e | ||
|
101df40eec | ||
|
c22adb3a47 | ||
|
b93357e36c | ||
|
518fb16067 | ||
|
4d9297e3da | ||
|
544895e051 | ||
|
fd80bb526e | ||
|
c1fe3278fa | ||
|
2854c68209 | ||
|
505f07f056 | ||
|
8bce3f1f00 | ||
|
18e9ce4557 | ||
|
9f73a334b3 | ||
|
4eab1eb738 | ||
|
685bbf46a6 | ||
|
d89225f37d | ||
|
55d34a3b40 | ||
|
85b5c96bdd | ||
|
6f5f400765 | ||
|
be87861657 | ||
|
76e5d37895 | ||
|
8aca246f51 | ||
|
be7b56bae3 | ||
|
eae2382764 | ||
|
76076c8f51 | ||
|
7d11df3225 | ||
|
0ae8cbb877 | ||
|
b9f0ef1c55 |
18
.github/workflows/release.yml
vendored
18
.github/workflows/release.yml
vendored
@@ -14,9 +14,21 @@ jobs:
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: sudo apt-get install -y upx
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build test
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
@@ -24,7 +36,7 @@ jobs:
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make deps multiarch-build-small test-integration test-coverage
|
||||
make test-integration test-coverage
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
||||
- name: Release
|
||||
|
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
@@ -18,6 +18,13 @@ jobs:
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: sudo apt-get install -y upx
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make deps multiarch-build-small test-integration test-coverage
|
||||
run: sudo -E env "PATH=$PATH" make test-integration test-coverage
|
||||
|
@@ -6,6 +6,8 @@
|
||||
[](https://godoc.org/github.com/mudler/luet)
|
||||
[](https://codecov.io/gh/mudler/luet)
|
||||
|
||||
[](https://asciinema.org/a/388348)
|
||||
|
||||
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
|
||||
|
||||
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/) in YAML notation to define both [packages](https://luet-lab.github.io/docs/docs/concepts/packages/) and [rootfs](https://luet-lab.github.io/docs/docs/concepts/packages/#package-layers). As it is based on containers, it can be also used to build stages for Linux From Scratch installations and it can build and track updates for those systems.
|
||||
|
140
cmd/build.go
140
cmd/build.go
@@ -16,14 +16,18 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -40,17 +44,17 @@ var buildCmd = &cobra.Command{
|
||||
Long: `Builds one or more packages from a tree (current directory is implied):
|
||||
|
||||
$ luet build utils/busybox utils/yq ...
|
||||
|
||||
|
||||
Builds all packages
|
||||
|
||||
|
||||
$ luet build --all
|
||||
|
||||
|
||||
Builds only the leaf packages:
|
||||
|
||||
|
||||
$ luet build --full
|
||||
|
||||
Build package revdeps:
|
||||
|
||||
|
||||
$ luet build --revdeps utils/yq
|
||||
|
||||
Build package without dependencies (needs the images already in the host, or either need to be available online):
|
||||
@@ -65,13 +69,13 @@ Build packages specifying multiple definition trees:
|
||||
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
|
||||
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
|
||||
viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
|
||||
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
|
||||
viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
|
||||
viper.BindPFlag("all", cmd.Flags().Lookup("all"))
|
||||
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
|
||||
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
viper.BindPFlag("values", cmd.Flags().Lookup("values"))
|
||||
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
|
||||
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
|
||||
viper.BindPFlag("push", cmd.Flags().Lookup("push"))
|
||||
@@ -79,12 +83,13 @@ Build packages specifying multiple definition trees:
|
||||
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
||||
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
||||
|
||||
LuetCfg.Viper.BindPFlag("keep-exported-images", cmd.Flags().Lookup("keep-exported-images"))
|
||||
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
LuetCfg.Viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
|
||||
LuetCfg.Viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
@@ -95,55 +100,48 @@ Build packages specifying multiple definition trees:
|
||||
privileged := viper.GetBool("privileged")
|
||||
revdeps := viper.GetBool("revdeps")
|
||||
all := viper.GetBool("all")
|
||||
databaseType := viper.GetString("database")
|
||||
compressionType := viper.GetString("compression")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
values := viper.GetString("values")
|
||||
values := viper.GetStringSlice("values")
|
||||
wait := viper.GetBool("wait")
|
||||
push := viper.GetBool("push")
|
||||
pull := viper.GetBool("pull")
|
||||
keepImages := viper.GetBool("keep-images")
|
||||
nodeps := viper.GetBool("nodeps")
|
||||
onlydeps := viper.GetBool("onlydeps")
|
||||
keepExportedImages := viper.GetBool("keep-exported-images")
|
||||
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
var results Results
|
||||
backendArgs := viper.GetStringSlice("backend-args")
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
LuetCfg.GetLogging().SetLogLevel("error")
|
||||
}
|
||||
pretend, _ := cmd.Flags().GetBool("pretend")
|
||||
compilerSpecs := compiler.NewLuetCompilationspecs()
|
||||
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
|
||||
|
||||
compilerSpecs := compilerspec.NewLuetCompilationspecs()
|
||||
var db pkg.PackageDatabase
|
||||
|
||||
compilerBackend := backend.NewBackend(backendType)
|
||||
compilerBackend, err := compiler.NewBackend(backendType)
|
||||
helpers.CheckErr(err)
|
||||
|
||||
switch databaseType {
|
||||
case "memory":
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
|
||||
case "boltdb":
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
db = pkg.NewBoltDatabase(tmpdir)
|
||||
|
||||
}
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
defer db.Clean()
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(db)
|
||||
|
||||
if fromRepo {
|
||||
if err := installer.LoadBuildTree(generalRecipe, db, LuetCfg); err != nil {
|
||||
Warning("errors while loading trees from repositories", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, src := range treePaths {
|
||||
Info("Loading tree", src)
|
||||
|
||||
err := generalRecipe.Load(src)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(generalRecipe.Load(src))
|
||||
}
|
||||
|
||||
Info("Building in", dst)
|
||||
@@ -152,36 +150,43 @@ Build packages specifying multiple definition trees:
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
LuetCfg.GetGeneral().ShowBuildOutput = LuetCfg.Viper.GetBool("general.show_build_output")
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
opts := compiler.NewDefaultCompilerOptions()
|
||||
opts.SolverOptions = *LuetCfg.GetSolverOptions()
|
||||
opts.ImageRepository = imageRepository
|
||||
opts.PullFirst = pull
|
||||
opts.KeepImg = keepImages
|
||||
opts.Push = push
|
||||
opts.OnlyDeps = onlydeps
|
||||
opts.NoDeps = nodeps
|
||||
opts.Wait = wait
|
||||
opts.KeepImageExport = keepExportedImages
|
||||
opts.PackageTargetOnly = onlyTarget
|
||||
opts.BuildValuesFile = values
|
||||
var solverOpts solver.Options
|
||||
if concurrent {
|
||||
solverOpts = solver.Options{Type: solver.ParallelSimple, Concurrency: concurrency}
|
||||
} else {
|
||||
solverOpts = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
|
||||
opts := &LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), opts, solverOpts)
|
||||
luetCompiler.SetConcurrency(concurrency)
|
||||
luetCompiler.SetCompressionType(compiler.CompressionImplementation(compressionType))
|
||||
Debug("Solver", opts.CompactString())
|
||||
|
||||
if concurrent {
|
||||
opts.Options = solver.Options{Type: solver.ParallelSimple, Concurrency: concurrency}
|
||||
} else {
|
||||
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
|
||||
}
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(),
|
||||
options.NoDeps(nodeps),
|
||||
options.WithBackendType(backendType),
|
||||
options.PushImages(push),
|
||||
options.WithBuildValues(values),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.WithSolverOptions(*opts),
|
||||
options.Wait(wait),
|
||||
options.OnlyTarget(onlyTarget),
|
||||
options.PullFirst(pull),
|
||||
options.KeepImg(keepImages),
|
||||
options.OnlyDeps(onlydeps),
|
||||
options.BackendArgs(backendArgs),
|
||||
options.Concurrency(concurrency),
|
||||
options.WithCompressionType(compression.Implementation(compressionType)),
|
||||
)
|
||||
|
||||
if full {
|
||||
specs, err := luetCompiler.FromDatabase(generalRecipe.GetDatabase(), true, dst)
|
||||
if err != nil {
|
||||
@@ -194,7 +199,6 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
} else if !all {
|
||||
for _, a := range args {
|
||||
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
@@ -222,13 +226,13 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
}
|
||||
|
||||
var artifact []compiler.Artifact
|
||||
var artifact []*artifact.PackageArtifact
|
||||
var errs []error
|
||||
if revdeps {
|
||||
artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs)
|
||||
|
||||
} else if pretend {
|
||||
toCalculate := []compiler.CompilationSpec{}
|
||||
toCalculate := []*compilerspec.LuetCompilationSpec{}
|
||||
if full {
|
||||
var err error
|
||||
toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...)
|
||||
@@ -278,6 +282,7 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
artifact, errs = luetCompiler.CompileParallel(privileged, compilerSpecs)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
@@ -287,7 +292,7 @@ Build packages specifying multiple definition trees:
|
||||
Fatal("Bailing out")
|
||||
}
|
||||
for _, a := range artifact {
|
||||
Info("Artifact generated:", a.GetPath())
|
||||
Info("Artifact generated:", a.Path)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -297,14 +302,15 @@ func init() {
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
|
||||
buildCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the tree to use.")
|
||||
buildCmd.Flags().String("backend", "docker", "backend used (docker,img)")
|
||||
buildCmd.Flags().Bool("privileged", false, "Privileged (Keep permissions)")
|
||||
buildCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
|
||||
buildCmd.Flags().Bool("privileged", true, "Privileged (Keep permissions)")
|
||||
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
|
||||
buildCmd.Flags().Bool("all", false, "Build all specfiles in the tree")
|
||||
buildCmd.Flags().Bool("full", false, "Build all packages (optimized)")
|
||||
buildCmd.Flags().String("values", "", "Build values file to interpolate with each package")
|
||||
buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package")
|
||||
buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args")
|
||||
|
||||
buildCmd.Flags().String("destination", filepath.Join(path, "build"), "Destination folder")
|
||||
buildCmd.Flags().String("compression", "none", "Compression alg: none, gzip, zstd")
|
||||
@@ -315,15 +321,17 @@ func init() {
|
||||
buildCmd.Flags().Bool("keep-images", true, "Keep built docker images in the host")
|
||||
buildCmd.Flags().Bool("nodeps", false, "Build only the target packages, skipping deps (it works only if you already built the deps locally, or by using --pull) ")
|
||||
buildCmd.Flags().Bool("onlydeps", false, "Build only package dependencies")
|
||||
buildCmd.Flags().Bool("keep-exported-images", false, "Keep exported images used during building")
|
||||
buildCmd.Flags().Bool("only-target-package", false, "Build packages of only the required target. Otherwise builds all the necessary ones not present in the destination")
|
||||
buildCmd.Flags().String("solver-type", "", "Solver strategy")
|
||||
buildCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
buildCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
buildCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
||||
buildCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
buildCmd.Flags().Bool("live-output", LuetCfg.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
|
||||
buildCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
|
||||
buildCmd.Flags().Bool("pretend", false, "Just print what packages will be compiled")
|
||||
buildCmd.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
|
||||
|
||||
buildCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -31,13 +32,24 @@ var cleanupCmd = &cobra.Command{
|
||||
Use: "cleanup",
|
||||
Short: "Clean packages cache.",
|
||||
Long: `remove downloaded packages tarballs and clean cache directory`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var cleaned int = 0
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := config.LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := config.LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
// Check if cache dir exists
|
||||
if helpers.Exists(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
if helpers.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
|
||||
files, err := ioutil.ReadDir(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
|
||||
files, err := ioutil.ReadDir(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
|
||||
if err != nil {
|
||||
Fatal("Error on read cachedir ", err.Error())
|
||||
}
|
||||
@@ -47,12 +59,12 @@ var cleanupCmd = &cobra.Command{
|
||||
continue
|
||||
}
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
Info("Removing ", file.Name())
|
||||
}
|
||||
|
||||
err := os.RemoveAll(
|
||||
filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
|
||||
filepath.Join(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
|
||||
if err != nil {
|
||||
Fatal("Error on removing", file.Name())
|
||||
}
|
||||
@@ -66,5 +78,8 @@ var cleanupCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
cleanupCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
cleanupCmd.Flags().String("system-target", "", "System rootpath")
|
||||
cleanupCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
RootCmd.AddCommand(cleanupCmd)
|
||||
}
|
||||
|
@@ -18,11 +18,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
// . "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -74,7 +76,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
var repo installer.Repository
|
||||
var repo *installer.LuetSystemRepository
|
||||
|
||||
treePaths := viper.GetStringSlice("tree")
|
||||
dst := viper.GetString("output")
|
||||
@@ -90,19 +92,19 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
metaName := viper.GetString("meta-filename")
|
||||
source_repo := viper.GetString("repo")
|
||||
backendType := viper.GetString("backend")
|
||||
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
|
||||
|
||||
treeFile := installer.NewDefaultTreeRepositoryFile()
|
||||
metaFile := installer.NewDefaultMetaRepositoryFile()
|
||||
compilerBackend := backend.NewBackend(backendType)
|
||||
compilerBackend, err := compiler.NewBackend(backendType)
|
||||
helpers.CheckErr(err)
|
||||
force := viper.GetBool("force-push")
|
||||
imagePush := viper.GetBool("push-images")
|
||||
|
||||
if source_repo != "" {
|
||||
// Search for system repository
|
||||
lrepo, err := LuetCfg.GetSystemRepository(source_repo)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
if len(treePaths) <= 0 {
|
||||
treePaths = []string{lrepo.TreePath}
|
||||
@@ -118,19 +120,23 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
lrepo.Priority,
|
||||
packages,
|
||||
treePaths,
|
||||
pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force)
|
||||
pkg.NewInMemoryDatabase(false),
|
||||
compilerBackend,
|
||||
dst,
|
||||
imagePush,
|
||||
force,
|
||||
fromRepo,
|
||||
LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
|
||||
} else {
|
||||
repo, err = installer.GenerateRepository(name, descr, t, urls, 1, packages,
|
||||
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force, fromRepo, LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
}
|
||||
|
||||
if treetype != "" {
|
||||
treeFile.SetCompressionType(compiler.CompressionImplementation(treetype))
|
||||
treeFile.SetCompressionType(compression.Implementation(treetype))
|
||||
}
|
||||
|
||||
if treeName != "" {
|
||||
@@ -138,7 +144,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
}
|
||||
|
||||
if metatype != "" {
|
||||
metaFile.SetCompressionType(compiler.CompressionImplementation(metatype))
|
||||
metaFile.SetCompressionType(compression.Implementation(metatype))
|
||||
}
|
||||
|
||||
if metaName != "" {
|
||||
@@ -149,17 +155,15 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
repo.SetRepositoryFile(installer.REPOFILE_META_KEY, metaFile)
|
||||
|
||||
err = repo.Write(dst, reset, true)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
createrepoCmd.Flags().String("packages", filepath.Join(path, "build"), "Packages folder (output from build)")
|
||||
createrepoCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the source trees to use.")
|
||||
createrepoCmd.Flags().String("output", filepath.Join(path, "build"), "Destination for generated archives. With 'docker' repository type, it should be an image reference (e.g 'foo/bar')")
|
||||
@@ -178,6 +182,7 @@ func init() {
|
||||
createrepoCmd.Flags().String("tree-filename", installer.TREE_TARBALL, "Repository tree filename")
|
||||
createrepoCmd.Flags().String("meta-compression", "none", "Compression alg: none, gzip, zstd")
|
||||
createrepoCmd.Flags().String("meta-filename", installer.REPOSITORY_METAFILE+".tar", "Repository metadata filename")
|
||||
createrepoCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
|
||||
RootCmd.AddCommand(createrepoCmd)
|
||||
}
|
||||
|
@@ -36,6 +36,7 @@ func init() {
|
||||
|
||||
databaseGroupCmd.AddCommand(
|
||||
NewDatabaseCreateCommand(),
|
||||
NewDatabaseGetCommand(),
|
||||
NewDatabaseRemoveCommand(),
|
||||
)
|
||||
}
|
||||
|
@@ -18,7 +18,8 @@ package cmd_database
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
@@ -46,10 +47,19 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
for _, a := range args {
|
||||
@@ -57,25 +67,29 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
|
||||
if err != nil {
|
||||
Fatal("Failed reading ", a, ": ", err.Error())
|
||||
}
|
||||
art, err := compiler.NewPackageArtifactFromYaml(dat)
|
||||
art, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
Fatal("Failed reading yaml ", a, ": ", err.Error())
|
||||
}
|
||||
|
||||
files := art.GetFiles()
|
||||
files := art.Files
|
||||
|
||||
if _, err := systemDB.CreatePackage(art.GetCompileSpec().GetPackage()); err != nil {
|
||||
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
|
||||
Fatal("Failed to create ", a, ": ", err.Error())
|
||||
}
|
||||
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.GetCompileSpec().GetPackage().GetFingerPrint(), Files: files}); err != nil {
|
||||
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.CompileSpec.GetPackage().GetFingerPrint(), Files: files}); err != nil {
|
||||
Fatal("Failed setting package files for ", a, ": ", err.Error())
|
||||
}
|
||||
|
||||
Info(art.GetCompileSpec().GetPackage().HumanReadableString(), " created")
|
||||
Info(art.CompileSpec.GetPackage().HumanReadableString(), " created")
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
ans.Flags().String("system-dbpath", "", "System db path")
|
||||
ans.Flags().String("system-target", "", "System rootpath")
|
||||
ans.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
return ans
|
||||
}
|
||||
|
95
cmd/database/get.go
Normal file
95
cmd/database/get.go
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd_database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewDatabaseGetCommand() *cobra.Command {
|
||||
var c = &cobra.Command{
|
||||
Use: "get <package>",
|
||||
Short: "Get a package in the system DB as yaml",
|
||||
Long: `Get a package in the system database in the YAML format:
|
||||
|
||||
$ luet database get system/foo
|
||||
|
||||
To return also files:
|
||||
$ luet database get --files system/foo`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
showFiles, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ps, err := systemDB.FindPackages(pack)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, p := range ps {
|
||||
y, err := p.Yaml()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
if showFiles {
|
||||
files, err := systemDB.GetPackageFiles(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
b, err := yaml.Marshal(files)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fmt.Println("files:\n" + string(b))
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
c.Flags().Bool("files", false, "Show package files.")
|
||||
c.Flags().String("system-dbpath", "", "System db path")
|
||||
c.Flags().String("system-target", "", "System rootpath")
|
||||
c.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
return c
|
||||
}
|
@@ -38,9 +38,18 @@ This commands takes multiple packages as arguments and prunes their entries from
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
for _, a := range args {
|
||||
@@ -60,6 +69,9 @@ This commands takes multiple packages as arguments and prunes their entries from
|
||||
|
||||
},
|
||||
}
|
||||
ans.Flags().String("system-dbpath", "", "System db path")
|
||||
ans.Flags().String("system-target", "", "System rootpath")
|
||||
ans.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
return ans
|
||||
}
|
||||
|
@@ -22,6 +22,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
_gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
version "github.com/mudler/luet/pkg/versioner"
|
||||
@@ -56,7 +58,8 @@ func packageData(p string) (string, string) {
|
||||
}
|
||||
func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
|
||||
|
||||
if !strings.HasPrefix(p, "=") {
|
||||
if !(strings.HasPrefix(p, "=") || strings.HasPrefix(p, ">") ||
|
||||
strings.HasPrefix(p, "<")) {
|
||||
ver := ">=0"
|
||||
cat := ""
|
||||
name := ""
|
||||
@@ -111,3 +114,9 @@ func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
|
||||
|
||||
return pack, nil
|
||||
}
|
||||
|
||||
func CheckErr(err error) {
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@@ -66,5 +66,37 @@ var _ = Describe("CLI Helpers", func() {
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition", func() {
|
||||
pack, err := ParsePackageStr(">=cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal(">=1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition2", func() {
|
||||
pack, err := ParsePackageStr("<cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("<1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition3", func() {
|
||||
pack, err := ParsePackageStr(">cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal(">1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition4", func() {
|
||||
pack, err := ParsePackageStr("<=cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("<=1.2"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -15,8 +15,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
@@ -51,6 +49,7 @@ To force install a package:
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
@@ -71,16 +70,6 @@ To force install a package:
|
||||
toInstall = append(toInstall, pack)
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
@@ -90,6 +79,15 @@ To force install a package:
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
@@ -103,6 +101,7 @@ To force install a package:
|
||||
}
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
repos := installer.SystemRepositories(LuetCfg)
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
@@ -114,6 +113,7 @@ To force install a package:
|
||||
Force: force,
|
||||
OnlyDeps: onlydeps,
|
||||
PreserveSystemEssentialData: true,
|
||||
DownloadOnly: downloadOnly,
|
||||
Ask: !yes,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
@@ -127,12 +127,10 @@ To force install a package:
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
installCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
installCmd.Flags().String("system-target", path, "System rootpath")
|
||||
installCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
installCmd.Flags().String("system-target", "", "System rootpath")
|
||||
installCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
installCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
installCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
installCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
@@ -142,6 +140,7 @@ func init() {
|
||||
installCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
|
||||
installCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
installCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
installCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
RootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
22
cmd/pack.go
22
cmd/pack.go
@@ -20,7 +20,9 @@ import (
|
||||
"time"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -64,21 +66,21 @@ Afterwards, you can use the content generated and associate it with a tree and a
|
||||
Fatal("Invalid package string ", packageName, ": ", err.Error())
|
||||
}
|
||||
|
||||
spec := &compiler.LuetCompilationSpec{Package: p}
|
||||
artifact := compiler.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
|
||||
artifact.SetCompressionType(compiler.CompressionImplementation(compressionType))
|
||||
err = artifact.Compress(sourcePath, concurrency)
|
||||
spec := &compilerspec.LuetCompilationSpec{Package: p}
|
||||
a := artifact.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
|
||||
a.CompressionType = compression.Implementation(compressionType)
|
||||
err = a.Compress(sourcePath, concurrency)
|
||||
if err != nil {
|
||||
Fatal("failed compressing ", packageName, ": ", err.Error())
|
||||
}
|
||||
artifact.SetCompileSpec(spec)
|
||||
filelist, err := artifact.FileList()
|
||||
a.CompileSpec = spec
|
||||
filelist, err := a.FileList()
|
||||
if err != nil {
|
||||
Fatal("failed generating file list for ", packageName, ": ", err.Error())
|
||||
}
|
||||
artifact.SetFiles(filelist)
|
||||
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
err = artifact.WriteYaml(dst)
|
||||
a.Files = filelist
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
err = a.WriteYaml(dst)
|
||||
if err != nil {
|
||||
Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
|
||||
}
|
||||
|
@@ -15,8 +15,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
@@ -31,6 +29,7 @@ var reclaimCmd = &cobra.Command{
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
},
|
||||
Long: `Reclaim tries to find association between packages in the online repositories and the system one.
|
||||
@@ -40,6 +39,13 @@ var reclaimCmd = &cobra.Command{
|
||||
It scans the target file system, and if finds a match with a package available in the repositories, it marks as installed in the system database.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
@@ -71,12 +77,11 @@ It scans the target file system, and if finds a match with a package available i
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
reclaimCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
reclaimCmd.Flags().String("system-target", path, "System rootpath")
|
||||
|
||||
reclaimCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
reclaimCmd.Flags().String("system-target", "", "System rootpath")
|
||||
reclaimCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
reclaimCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
|
||||
|
||||
RootCmd.AddCommand(reclaimCmd)
|
||||
|
@@ -15,8 +15,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
@@ -38,6 +36,7 @@ var replaceCmd = &cobra.Command{
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
@@ -64,6 +63,14 @@ var replaceCmd = &cobra.Command{
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
@@ -115,6 +122,7 @@ var replaceCmd = &cobra.Command{
|
||||
OnlyDeps: onlydeps,
|
||||
PreserveSystemEssentialData: true,
|
||||
Ask: !yes,
|
||||
DownloadOnly: downloadOnly,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
|
||||
@@ -127,12 +135,11 @@ var replaceCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
replaceCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
replaceCmd.Flags().String("system-target", path, "System rootpath")
|
||||
|
||||
replaceCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
replaceCmd.Flags().String("system-target", "", "System rootpath")
|
||||
replaceCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
replaceCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
replaceCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
replaceCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
@@ -143,6 +150,7 @@ func init() {
|
||||
replaceCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
replaceCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
replaceCmd.Flags().StringSlice("for", []string{}, "Packages that has to be installed in place of others")
|
||||
replaceCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
RootCmd.AddCommand(replaceCmd)
|
||||
}
|
||||
|
@@ -72,8 +72,8 @@ func NewRepoListCommand() *cobra.Command {
|
||||
if repo.Cached {
|
||||
|
||||
r := installer.NewSystemRepository(repo)
|
||||
localRepo, _ := r.(*installer.LuetSystemRepository).ReadSpecFile(filepath.Join(repobasedir,
|
||||
installer.REPOSITORY_SPECFILE), false)
|
||||
localRepo, _ := r.ReadSpecFile(filepath.Join(repobasedir,
|
||||
installer.REPOSITORY_SPECFILE))
|
||||
if localRepo != nil {
|
||||
tsec, _ := strconv.ParseInt(localRepo.GetLastUpdate(), 10, 64)
|
||||
repoRevision = Bold(Red(localRepo.GetRevision())).String() +
|
||||
|
@@ -40,7 +40,7 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.10.0"
|
||||
LuetCLIVersion = "0.13.1"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
|
361
cmd/search.go
361
cmd/search.go
@@ -16,7 +16,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@@ -30,12 +29,13 @@ import (
|
||||
)
|
||||
|
||||
type PackageResult struct {
|
||||
Name string `json:"name"`
|
||||
Category string `json:"category"`
|
||||
Version string `json:"version"`
|
||||
Repository string `json:"repository"`
|
||||
Target string `json:"target"`
|
||||
Hidden bool `json:"hidden"`
|
||||
Name string `json:"name"`
|
||||
Category string `json:"category"`
|
||||
Version string `json:"version"`
|
||||
Repository string `json:"repository"`
|
||||
Target string `json:"target"`
|
||||
Hidden bool `json:"hidden"`
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
|
||||
type Results struct {
|
||||
@@ -64,6 +64,210 @@ func packageToList(l list.Writer, repo string, p pkg.Package) {
|
||||
l.UnIndent()
|
||||
}
|
||||
|
||||
func searchLocally(term string, l list.Writer, t table.Writer, label, labelMatch, revdeps, hidden bool) Results {
|
||||
var results Results
|
||||
|
||||
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
|
||||
|
||||
var err error
|
||||
iMatches := pkg.Packages{}
|
||||
if label {
|
||||
iMatches, err = system.Database.FindPackageLabel(term)
|
||||
} else if labelMatch {
|
||||
iMatches, err = system.Database.FindPackageLabelMatch(term)
|
||||
} else {
|
||||
iMatches, err = system.Database.FindPackageMatch(term)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
for _, pack := range iMatches {
|
||||
if !revdeps {
|
||||
if !pack.IsHidden() || pack.IsHidden() && hidden {
|
||||
|
||||
t.AppendRow(packageToRow("system", pack))
|
||||
packageToList(l, "system", pack)
|
||||
f, _ := system.Database.GetPackageFiles(pack)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: pack.GetName(),
|
||||
Version: pack.GetVersion(),
|
||||
Category: pack.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: pack.IsHidden(),
|
||||
Files: f,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
|
||||
packs, _ := system.Database.GetRevdeps(pack)
|
||||
for _, revdep := range packs {
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow("system", pack))
|
||||
packageToList(l, "system", pack)
|
||||
f, _ := system.Database.GetPackageFiles(revdep)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: revdep.IsHidden(),
|
||||
Files: f,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return results
|
||||
|
||||
}
|
||||
func searchOnline(term string, l list.Writer, t table.Writer, label, labelMatch, revdeps, hidden bool) Results {
|
||||
var results Results
|
||||
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(
|
||||
installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
},
|
||||
)
|
||||
inst.Repositories(repos)
|
||||
synced, err := inst.SyncRepositories(false)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
Info("--- Search results (" + term + "): ---")
|
||||
|
||||
matches := []installer.PackageMatch{}
|
||||
if label {
|
||||
matches = synced.SearchLabel(term)
|
||||
} else if labelMatch {
|
||||
matches = synced.SearchLabelMatch(term)
|
||||
} else {
|
||||
matches = synced.Search(term)
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
if !revdeps {
|
||||
if !m.Package.IsHidden() || m.Package.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
|
||||
packageToList(l, m.Repo.GetName(), m.Package)
|
||||
r := &PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
}
|
||||
if m.Artifact != nil {
|
||||
r.Files = m.Artifact.Files
|
||||
}
|
||||
results.Packages = append(results.Packages, *r)
|
||||
}
|
||||
} else {
|
||||
packs, _ := m.Repo.GetTree().GetDatabase().GetRevdeps(m.Package)
|
||||
for _, revdep := range packs {
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), revdep))
|
||||
packageToList(l, m.Repo.GetName(), revdep)
|
||||
r := &PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: revdep.IsHidden(),
|
||||
}
|
||||
if m.Artifact != nil {
|
||||
r.Files = m.Artifact.Files
|
||||
}
|
||||
results.Packages = append(results.Packages, *r)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
func searchLocalFiles(term string, l list.Writer, t table.Writer) Results {
|
||||
var results Results
|
||||
Info("--- Search results (" + term + "): ---")
|
||||
|
||||
matches, _ := LuetCfg.GetSystemDB().FindPackageByFile(term)
|
||||
for _, pack := range matches {
|
||||
t.AppendRow(packageToRow("system", pack))
|
||||
packageToList(l, "system", pack)
|
||||
f, _ := LuetCfg.GetSystemDB().GetPackageFiles(pack)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: pack.GetName(),
|
||||
Version: pack.GetVersion(),
|
||||
Category: pack.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: pack.IsHidden(),
|
||||
Files: f,
|
||||
})
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func searchFiles(term string, l list.Writer, t table.Writer) Results {
|
||||
var results Results
|
||||
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(
|
||||
installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
},
|
||||
)
|
||||
inst.Repositories(repos)
|
||||
synced, err := inst.SyncRepositories(false)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
Info("--- Search results (" + term + "): ---")
|
||||
|
||||
matches := []installer.PackageMatch{}
|
||||
|
||||
matches = synced.SearchPackages(term, installer.FileSearch)
|
||||
|
||||
for _, m := range matches {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
|
||||
packageToList(l, m.Repo.GetName(), m.Package)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
Files: m.Artifact.Files,
|
||||
})
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
var searchCmd = &cobra.Command{
|
||||
Use: "search <term>",
|
||||
Short: "Search packages",
|
||||
@@ -107,6 +311,7 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
},
|
||||
@@ -128,7 +333,14 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
searchWithLabelMatch, _ := cmd.Flags().GetBool("by-label-regex")
|
||||
revdeps, _ := cmd.Flags().GetBool("revdeps")
|
||||
tableMode, _ := cmd.Flags().GetBool("table")
|
||||
files, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
LuetCfg.GetLogging().SetLogLevel("error")
|
||||
@@ -144,121 +356,15 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
t.AppendHeader(rows)
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
if !installed {
|
||||
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(
|
||||
installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
},
|
||||
)
|
||||
inst.Repositories(repos)
|
||||
synced, err := inst.SyncRepositories(false)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
Info("--- Search results (" + args[0] + "): ---")
|
||||
|
||||
matches := []installer.PackageMatch{}
|
||||
if searchWithLabel {
|
||||
matches = synced.SearchLabel(args[0])
|
||||
} else if searchWithLabelMatch {
|
||||
matches = synced.SearchLabelMatch(args[0])
|
||||
} else {
|
||||
matches = synced.Search(args[0])
|
||||
}
|
||||
for _, m := range matches {
|
||||
if !revdeps {
|
||||
if !m.Package.IsHidden() || m.Package.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
|
||||
packageToList(l, m.Repo.GetName(), m.Package)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
packs, _ := m.Repo.GetTree().GetDatabase().GetRevdeps(m.Package)
|
||||
for _, revdep := range packs {
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), revdep))
|
||||
packageToList(l, m.Repo.GetName(), revdep)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: revdep.IsHidden(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
|
||||
|
||||
var err error
|
||||
iMatches := pkg.Packages{}
|
||||
if searchWithLabel {
|
||||
iMatches, err = system.Database.FindPackageLabel(args[0])
|
||||
} else if searchWithLabelMatch {
|
||||
iMatches, err = system.Database.FindPackageLabelMatch(args[0])
|
||||
} else {
|
||||
iMatches, err = system.Database.FindPackageMatch(args[0])
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
for _, pack := range iMatches {
|
||||
if !revdeps {
|
||||
if !pack.IsHidden() || pack.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow("system", pack))
|
||||
packageToList(l, "system", pack)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: pack.GetName(),
|
||||
Version: pack.GetVersion(),
|
||||
Category: pack.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: pack.IsHidden(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
packs, _ := system.Database.GetRevdeps(pack)
|
||||
for _, revdep := range packs {
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow("system", pack))
|
||||
packageToList(l, "system", pack)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: revdep.IsHidden(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case files && installed:
|
||||
results = searchLocalFiles(args[0], l, t)
|
||||
case files && !installed:
|
||||
results = searchFiles(args[0], l, t)
|
||||
case !installed:
|
||||
results = searchOnline(args[0], l, t, searchWithLabel, searchWithLabelMatch, revdeps, hidden)
|
||||
default:
|
||||
results = searchLocally(args[0], l, t, searchWithLabel, searchWithLabelMatch, revdeps, hidden)
|
||||
}
|
||||
|
||||
t.AppendFooter(rows)
|
||||
@@ -292,12 +398,10 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
searchCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
searchCmd.Flags().String("system-target", path, "System rootpath")
|
||||
searchCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
searchCmd.Flags().String("system-target", "", "System rootpath")
|
||||
searchCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
searchCmd.Flags().Bool("installed", false, "Search between system packages")
|
||||
searchCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
searchCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
@@ -309,6 +413,7 @@ func init() {
|
||||
searchCmd.Flags().Bool("revdeps", false, "Search package reverse dependencies")
|
||||
searchCmd.Flags().Bool("hidden", false, "Include hidden packages")
|
||||
searchCmd.Flags().Bool("table", false, "show output in a table (wider screens)")
|
||||
searchCmd.Flags().Bool("files", false, "Search between packages files")
|
||||
|
||||
RootCmd.AddCommand(searchCmd)
|
||||
}
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -57,6 +58,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
|
||||
treePath, _ := cmd.Flags().GetStringArray("tree")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
@@ -73,12 +75,15 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
}
|
||||
compilerBackend := backend.NewSimpleDockerBackend()
|
||||
|
||||
opts := compiler.NewDefaultCompilerOptions()
|
||||
opts.SolverOptions = *LuetCfg.GetSolverOptions()
|
||||
opts.ImageRepository = imageRepository
|
||||
|
||||
solverOpts := solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, reciper.GetDatabase(), opts, solverOpts)
|
||||
opts := *LuetCfg.GetSolverOptions()
|
||||
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
|
||||
luetCompiler := compiler.NewLuetCompiler(
|
||||
compilerBackend,
|
||||
reciper.GetDatabase(),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithSolverOptions(opts),
|
||||
)
|
||||
|
||||
a := args[0]
|
||||
|
||||
@@ -135,6 +140,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
ans.Flags().StringArrayP("tree", "t", []string{path}, "Path of the tree to use.")
|
||||
ans.Flags().String("image-repository", "luet/cache", "Default base image string for generated image")
|
||||
ans.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
|
||||
|
||||
return ans
|
||||
}
|
||||
|
@@ -244,10 +244,33 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
|
||||
Spinner(32)
|
||||
solution, err := depSolver.Install(pkg.Packages{r})
|
||||
ass := solution.SearchByName(r.GetPackageName())
|
||||
if err == nil {
|
||||
_, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
}
|
||||
SpinnerStop()
|
||||
if err == nil {
|
||||
if ass == nil {
|
||||
|
||||
ans = errors.New(
|
||||
fmt.Sprintf("[%9s] %s/%s-%s: solution doesn't retrieve package %s/%s-%s.",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
))
|
||||
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
for idx, pa := range solution {
|
||||
fmt.Println(fmt.Sprintf("[%9s] %s/%s-%s: solution %d: %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(), idx,
|
||||
pa.Package.GetPackageName()))
|
||||
}
|
||||
}
|
||||
|
||||
Error(ans.Error())
|
||||
opts.IncrBrokenDeps()
|
||||
validpkg = false
|
||||
} else {
|
||||
_, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
|
@@ -15,8 +15,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
@@ -42,6 +40,7 @@ var uninstallCmd = &cobra.Command{
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
toRemove := []pkg.Package{}
|
||||
@@ -65,6 +64,13 @@ var uninstallCmd = &cobra.Command{
|
||||
fullClean, _ := cmd.Flags().GetBool("full-clean")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
@@ -101,12 +107,11 @@ var uninstallCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
uninstallCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
uninstallCmd.Flags().String("system-target", path, "System rootpath")
|
||||
|
||||
uninstallCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
uninstallCmd.Flags().String("system-target", "", "System rootpath")
|
||||
uninstallCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
uninstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
uninstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
uninstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
|
@@ -15,8 +15,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -38,6 +36,7 @@ var upgradeCmd = &cobra.Command{
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Long: `Upgrades packages in parallel`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -64,7 +63,14 @@ var upgradeCmd = &cobra.Command{
|
||||
sync, _ := cmd.Flags().GetBool("sync")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
@@ -91,6 +97,7 @@ var upgradeCmd = &cobra.Command{
|
||||
UpgradeNewRevisions: sync,
|
||||
PreserveSystemEssentialData: true,
|
||||
Ask: !yes,
|
||||
DownloadOnly: downloadOnly,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
|
||||
@@ -102,12 +109,10 @@ var upgradeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
upgradeCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
upgradeCmd.Flags().String("system-target", path, "System rootpath")
|
||||
upgradeCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
upgradeCmd.Flags().String("system-target", "", "System rootpath")
|
||||
upgradeCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
upgradeCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
upgradeCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
upgradeCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
@@ -120,6 +125,7 @@ func init() {
|
||||
upgradeCmd.Flags().Bool("sync", false, "Upgrade packages with new revisions (experimental)")
|
||||
upgradeCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
upgradeCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
upgradeCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
RootCmd.AddCommand(upgradeCmd)
|
||||
}
|
||||
|
36
cmd/util.go
Normal file
36
cmd/util.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var utilGroup = &cobra.Command{
|
||||
Use: "util [command] [OPTIONS]",
|
||||
Short: "General luet internal utilities exposed",
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(utilGroup)
|
||||
|
||||
utilGroup.AddCommand(
|
||||
NewUnpackCommand(),
|
||||
)
|
||||
}
|
98
cmd/util/unpack.go
Normal file
98
cmd/util/unpack.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUnpackCommand() *cobra.Command {
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "unpack image path",
|
||||
Short: "Unpack a docker image natively",
|
||||
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
|
||||
|
||||
luet util unpack golang:alpine /alpine
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
if len(args) != 2 {
|
||||
Fatal("Expects an image and a path")
|
||||
}
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
image := args[0]
|
||||
destination, err := filepath.Abs(args[1])
|
||||
if err != nil {
|
||||
Error("Invalid path %s", destination)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
verify, _ := cmd.Flags().GetBool("verify")
|
||||
user, _ := cmd.Flags().GetString("auth-username")
|
||||
pass, _ := cmd.Flags().GetString("auth-password")
|
||||
authType, _ := cmd.Flags().GetString("auth-type")
|
||||
server, _ := cmd.Flags().GetString("auth-server-address")
|
||||
identity, _ := cmd.Flags().GetString("auth-identity-token")
|
||||
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
|
||||
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Fatal("Cannot create a tempdir", err.Error())
|
||||
}
|
||||
|
||||
Info("Downloading", image, "to", destination)
|
||||
auth := &types.AuthConfig{
|
||||
Username: user,
|
||||
Password: pass,
|
||||
ServerAddress: server,
|
||||
Auth: authType,
|
||||
IdentityToken: identity,
|
||||
RegistryToken: registryToken,
|
||||
}
|
||||
|
||||
info, err := helpers.DownloadAndExtractDockerImage(temp, image, destination, auth, verify)
|
||||
if err != nil {
|
||||
Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
|
||||
c.Flags().String("auth-password", "", "Password to authenticate to registry")
|
||||
c.Flags().String("auth-type", "", "Auth type")
|
||||
c.Flags().String("auth-server-address", "", "Authentication server address")
|
||||
c.Flags().String("auth-identity-token", "", "Authentication identity token")
|
||||
c.Flags().String("auth-registry-token", "", "Authentication registry token")
|
||||
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
|
||||
return c
|
||||
}
|
21
go.mod
21
go.mod
@@ -4,12 +4,14 @@ go 1.14
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.4.4 // indirect
|
||||
github.com/Sabayon/pkgs-checker v0.7.2
|
||||
github.com/Sabayon/pkgs-checker v0.8.1
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
|
||||
github.com/briandowns/spinner v1.7.0
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
|
||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
@@ -18,8 +20,10 @@ require (
|
||||
github.com/genuinetools/img v0.5.11
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/google/go-containerregistry v0.2.1
|
||||
github.com/google/renameio v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-version v1.2.0
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/imdario/mergo v0.3.8
|
||||
github.com/jedib0t/go-pretty v4.3.0+incompatible
|
||||
github.com/jedib0t/go-pretty/v6 v6.0.5
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
@@ -37,18 +41,21 @@ require (
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
github.com/onsi/ginkgo v1.14.2
|
||||
github.com/onsi/gomega v1.10.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
|
||||
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.7.1
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/theupdateframework/notary v0.7.0
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/atomic v1.5.1 // indirect
|
||||
go.uber.org/multierr v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.4.0
|
||||
go.uber.org/zap v1.13.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
helm.sh/helm/v3 v3.3.4
|
||||
|
||||
|
75
go.sum
75
go.sum
@@ -81,6 +81,8 @@ github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tT
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/MottainaiCI/simplestreams-builder v0.1.0 h1:A8KJN22Xkx7NUKC9/zWmd1UhIqRn3bdHo0wv/HsAHx8=
|
||||
github.com/MottainaiCI/simplestreams-builder v0.1.0/go.mod h1:+Gbv6dg6TPHWq4oDjZY1vn978PLCEZ2hOu8kvn+S7t4=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
@@ -91,8 +93,11 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Sabayon/pkgs-checker v0.7.2 h1:mh53u5D7FTCeBJevYQA9cCxAWGTSuKqw7m/x7GsQVb0=
|
||||
github.com/Sabayon/pkgs-checker v0.7.2/go.mod h1:GFGM6ZzSE5owdGgjLnulj0+Vt9UTd5LFGmB2AOVPYrE=
|
||||
github.com/Sabayon/pkgs-checker v0.8.1 h1:pVen975z9WIecq7luntUn+0XzGdiyz2CsDay8w+ZmOw=
|
||||
github.com/Sabayon/pkgs-checker v0.8.1/go.mod h1:GC9PBUzcq0QVEBGRA1IIMXf6wHxo34KH5BeqoyJsLpo=
|
||||
github.com/Sereal/Sereal v0.0.0-20181211220259-509a78ddbda3 h1:Xu7z47ZiE/J+sKXHZMGxEor/oY2q6dq51fkO0JqdSwY=
|
||||
github.com/Sereal/Sereal v0.0.0-20181211220259-509a78ddbda3/go.mod h1:D0JMgToj/WdxCgd30Kc1UcA9E+WdZoJqeVOuYW7iTBM=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
@@ -121,6 +126,8 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154 h1:2lbe+CPe6eQf2EA3jjLdLFZKGv3cbYqVIDjKnzcyOXg=
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154/go.mod h1:cMLKpjHSP4q0P133fV15ojQgwWWB2IMv+hrFsmBF/wI=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
@@ -132,20 +139,24 @@ github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/briandowns/spinner v1.7.0 h1:aan1hBBOoscry2TXAkgtxkJiq7Se0+9pt+TUWaPrB4g=
|
||||
github.com/briandowns/spinner v1.7.0/go.mod h1://Zf9tMcxfRUA36V23M6YGEAv+kECGfvpnLTnb8n4XQ=
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31 h1:yInAg9pE5qGec5eQ7XdfOTTaGwGxD3bKFVjmD6VKkwc=
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
@@ -153,7 +164,9 @@ github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec h1:4XvMn0XuV7
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec/go.mod h1:NbXoa59CCAGqtRm7kRrcZIk2dTCJMRVF8QI3BOD7isY=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 h1:xz6Nv3zcwO2Lila35hcb0QloCQsc38Al13RNEzWRpX4=
|
||||
@@ -164,6 +177,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
@@ -241,6 +255,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
||||
github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
@@ -263,12 +278,15 @@ github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avu
|
||||
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
|
||||
github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@@ -282,6 +300,7 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNE
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
@@ -296,6 +315,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
@@ -377,11 +397,14 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
|
||||
github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
|
||||
@@ -400,6 +423,7 @@ github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c=
|
||||
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
@@ -430,6 +454,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
@@ -445,6 +470,7 @@ github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkY
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
@@ -467,6 +493,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/renameio v1.0.0 h1:xhp2CnJmgQmpJU4RY8chagahUq5mbPPAbiSQstKpVMA=
|
||||
github.com/google/renameio v1.0.0/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 h1:JM174NTeGNJ2m/oLH3UOWOvWQQKd+BoL3hcSCUWFLt0=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -485,6 +513,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
@@ -503,6 +532,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
@@ -521,6 +551,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
|
||||
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
@@ -559,6 +591,9 @@ github.com/jedib0t/go-pretty/v6 v6.0.5/go.mod h1:MTr6FgcfNdnN5wPVBzJ6mhJeDyiF0yB
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 h1:sHsPfNMAG70QAvKbddQ0uScZCHQoZsT5NykGRCeeeIs=
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=
|
||||
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
|
||||
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
@@ -578,6 +613,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
@@ -610,6 +646,7 @@ github.com/kyokomi/emoji v2.1.0+incompatible h1:+DYU2RgpI6OHG4oQkM5KlqD3Wd3UPEsX
|
||||
github.com/kyokomi/emoji v2.1.0+incompatible/go.mod h1:mZ6aGCD7yk8j6QY6KICwnZ2pxoszVseX1DNoGtU2tBA=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -620,6 +657,7 @@ github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e h1:yRWBTwWfMy5YPjT14Jr+p12ygqLpM9K5ojbbNPSd8hI=
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
@@ -649,13 +687,19 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.12.0 h1:u/x3mp++qUxvYfulZ4HKOvVO0JWhk7HtE8lWhbGz/Do=
|
||||
github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.4 h1:4rQjbDxdu9fSgI/r3KN72G3c2goxknAqHHgPWWs8UlI=
|
||||
github.com/mattn/go-sqlite3 v1.14.4/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/pkcs11 v1.0.2 h1:CIBkOawOtzJNE0B+EpRiUBzuVW7JEQAwdwhSS6YhIeg=
|
||||
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
|
||||
@@ -670,6 +714,7 @@ github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:
|
||||
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
@@ -820,10 +865,12 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.0.0-20180924113449-f69c853d21c1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
@@ -831,6 +878,7 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
@@ -838,6 +886,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
@@ -889,6 +938,8 @@ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
|
||||
@@ -905,30 +956,39 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs=
|
||||
github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw=
|
||||
github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
|
||||
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
@@ -951,6 +1011,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
|
||||
@@ -1058,12 +1120,15 @@ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9 h1:umElSU9WZirRdgu2yFHY0ayQkEnKiOC1TtM3fWXFnoU=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9 h1:phUcVbl53swtrUN8kQEXFhUxPlIlWyBfKmidCu7P95o=
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1195,6 +1260,7 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1219,6 +1285,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 h1:5Ftd3YbC/kANXWCBjvppvUmv1BMakgFcBKA7MpYYp4M=
|
||||
golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1349,6 +1417,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece h1:1YM0uhfumvoDu9sx8+RyWwTI63zoCQvI23IYFRlvte0=
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@@ -1381,6 +1450,7 @@ google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEG
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -1402,6 +1472,7 @@ gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
|
247
pkg/compiler/backend.go
Normal file
247
pkg/compiler/backend.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
func NewBackend(s string) (CompilerBackend, error) {
|
||||
var compilerBackend CompilerBackend
|
||||
|
||||
switch s {
|
||||
case backend.ImgBackend:
|
||||
compilerBackend = backend.NewSimpleImgBackend()
|
||||
case backend.DockerBackend:
|
||||
compilerBackend = backend.NewSimpleDockerBackend()
|
||||
default:
|
||||
return nil, errors.New("invalid backend. Unsupported")
|
||||
}
|
||||
|
||||
return compilerBackend, nil
|
||||
}
|
||||
|
||||
type CompilerBackend interface {
|
||||
BuildImage(backend.Options) error
|
||||
ExportImage(backend.Options) error
|
||||
RemoveImage(backend.Options) error
|
||||
ImageDefinitionToTar(backend.Options) error
|
||||
ExtractRootfs(opts backend.Options, keepPerms bool) error
|
||||
|
||||
CopyImage(string, string) error
|
||||
DownloadImage(opts backend.Options) error
|
||||
|
||||
Push(opts backend.Options) error
|
||||
ImageAvailable(string) bool
|
||||
|
||||
ImageExists(string) bool
|
||||
}
|
||||
|
||||
// GenerateChanges generates changes between two images using a backend by leveraging export/extractrootfs methods
|
||||
// example of json return: [
|
||||
// {
|
||||
// "Image1": "luet/base",
|
||||
// "Image2": "alpine",
|
||||
// "DiffType": "File",
|
||||
// "Diff": {
|
||||
// "Adds": null,
|
||||
// "Dels": [
|
||||
// {
|
||||
// "Name": "/luetbuild",
|
||||
// "Size": 5830706
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/Dockerfile",
|
||||
// "Size": 50
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/output1",
|
||||
// "Size": 5830656
|
||||
// }
|
||||
// ],
|
||||
// "Mods": null
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
func GenerateChanges(b CompilerBackend, fromImage, toImage backend.Options) ([]artifact.ArtifactLayer, error) {
|
||||
|
||||
res := artifact.ArtifactLayer{FromImage: fromImage.ImageName, ToImage: toImage.ImageName}
|
||||
|
||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
|
||||
srcRootFS, err := ioutil.TempDir(tmpdiffs, "src")
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(srcRootFS) // clean up
|
||||
|
||||
dstRootFS, err := ioutil.TempDir(tmpdiffs, "dst")
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(dstRootFS) // clean up
|
||||
|
||||
srcImageExtract := backend.Options{
|
||||
ImageName: fromImage.ImageName,
|
||||
Destination: srcRootFS,
|
||||
}
|
||||
Debug("Extracting source image", fromImage.ImageName)
|
||||
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+fromImage.ImageName)
|
||||
}
|
||||
|
||||
dstImageExtract := backend.Options{
|
||||
ImageName: toImage.ImageName,
|
||||
Destination: dstRootFS,
|
||||
}
|
||||
Debug("Extracting destination image", toImage.ImageName)
|
||||
err = b.ExtractRootfs(dstImageExtract, false)
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+toImage.ImageName)
|
||||
}
|
||||
|
||||
// Get Additions/Changes. dst -> src
|
||||
err = filepath.Walk(dstRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, dstRootFS, "", -1)
|
||||
fileInfo, err := os.Lstat(filepath.Join(srcRootFS, realpath))
|
||||
if err == nil {
|
||||
var sizeA, sizeB int64
|
||||
sizeA = fileInfo.Size()
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
|
||||
if sizeA != sizeB {
|
||||
// fmt.Println("File changed", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Changes = append(res.Diffs.Changes, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
} else {
|
||||
// fmt.Println("File already exists", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
} else {
|
||||
var sizeB int64
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
res.Diffs.Additions = append(res.Diffs.Additions, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
|
||||
// fmt.Println("File created", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
|
||||
}
|
||||
|
||||
// Get deletions. src -> dst
|
||||
err = filepath.Walk(srcRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, srcRootFS, "", -1)
|
||||
if _, err = os.Lstat(filepath.Join(dstRootFS, realpath)); err != nil {
|
||||
// fmt.Println("File deleted", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Deletions = append(res.Diffs.Deletions, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
|
||||
}
|
||||
|
||||
diffs := []artifact.ArtifactLayer{res}
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
summary := ComputeArtifactLayerSummary(diffs)
|
||||
for _, l := range summary.Layers {
|
||||
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
|
||||
l.FromImage, l.ToImage,
|
||||
l.AddFiles, l.AddSizes,
|
||||
l.DelFiles, l.DelSizes,
|
||||
l.ChangeFiles, l.ChangeSizes))
|
||||
}
|
||||
}
|
||||
|
||||
return diffs, nil
|
||||
}
|
||||
|
||||
type ArtifactLayerSummary struct {
|
||||
FromImage string `json:"image1"`
|
||||
ToImage string `json:"image2"`
|
||||
AddFiles int `json:"add_files"`
|
||||
AddSizes int64 `json:"add_sizes"`
|
||||
DelFiles int `json:"del_files"`
|
||||
DelSizes int64 `json:"del_sizes"`
|
||||
ChangeFiles int `json:"change_files"`
|
||||
ChangeSizes int64 `json:"change_sizes"`
|
||||
}
|
||||
|
||||
type ArtifactLayersSummary struct {
|
||||
Layers []ArtifactLayerSummary `json:"summary"`
|
||||
}
|
||||
|
||||
func ComputeArtifactLayerSummary(diffs []artifact.ArtifactLayer) ArtifactLayersSummary {
|
||||
|
||||
ans := ArtifactLayersSummary{
|
||||
Layers: make([]ArtifactLayerSummary, 0),
|
||||
}
|
||||
|
||||
for _, layer := range diffs {
|
||||
sum := ArtifactLayerSummary{
|
||||
FromImage: layer.FromImage,
|
||||
ToImage: layer.ToImage,
|
||||
AddFiles: 0,
|
||||
AddSizes: 0,
|
||||
DelFiles: 0,
|
||||
DelSizes: 0,
|
||||
ChangeFiles: 0,
|
||||
ChangeSizes: 0,
|
||||
}
|
||||
for _, a := range layer.Diffs.Additions {
|
||||
sum.AddFiles++
|
||||
sum.AddSizes += int64(a.Size)
|
||||
}
|
||||
for _, d := range layer.Diffs.Deletions {
|
||||
sum.DelFiles++
|
||||
sum.DelSizes += int64(d.Size)
|
||||
}
|
||||
for _, c := range layer.Diffs.Changes {
|
||||
sum.ChangeFiles++
|
||||
sum.ChangeSizes += int64(c.Size)
|
||||
}
|
||||
ans.Layers = append(ans.Layers, sum)
|
||||
}
|
||||
|
||||
return ans
|
||||
}
|
@@ -16,8 +16,13 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/crane"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,14 +35,48 @@ func imageAvailable(image string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func NewBackend(s string) compiler.CompilerBackend {
|
||||
var compilerBackend compiler.CompilerBackend
|
||||
|
||||
switch s {
|
||||
case ImgBackend:
|
||||
compilerBackend = NewSimpleImgBackend()
|
||||
case DockerBackend:
|
||||
compilerBackend = NewSimpleDockerBackend()
|
||||
}
|
||||
return compilerBackend
|
||||
type Options struct {
|
||||
ImageName string
|
||||
SourcePath string
|
||||
DockerFileName string
|
||||
Destination string
|
||||
Context string
|
||||
BackendArgs []string
|
||||
}
|
||||
|
||||
func runCommand(cmd *exec.Cmd) error {
|
||||
output := ""
|
||||
buffered := !config.LuetCfg.GetGeneral().ShowBuildOutput
|
||||
writer := NewBackendWriter(buffered)
|
||||
|
||||
cmd.Stdout = writer
|
||||
cmd.Stderr = writer
|
||||
|
||||
if buffered {
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
}
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed starting command")
|
||||
}
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
output = writer.GetCombinedOutput()
|
||||
return errors.Wrapf(err, "Failed running command: %s", output)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genBuildCommand(opts Options) []string {
|
||||
context := opts.Context
|
||||
|
||||
if context == "" {
|
||||
context = "."
|
||||
}
|
||||
buildarg := append(opts.BackendArgs, "-f", opts.DockerFileName, "-t", opts.ImageName, context)
|
||||
return append([]string{"build"}, buildarg...)
|
||||
}
|
||||
|
@@ -1,204 +0,0 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GenerateChanges generates changes between two images using a backend by leveraging export/extractrootfs methods
|
||||
// example of json return: [
|
||||
// {
|
||||
// "Image1": "luet/base",
|
||||
// "Image2": "alpine",
|
||||
// "DiffType": "File",
|
||||
// "Diff": {
|
||||
// "Adds": null,
|
||||
// "Dels": [
|
||||
// {
|
||||
// "Name": "/luetbuild",
|
||||
// "Size": 5830706
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/Dockerfile",
|
||||
// "Size": 50
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/output1",
|
||||
// "Size": 5830656
|
||||
// }
|
||||
// ],
|
||||
// "Mods": null
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
|
||||
srcImage := fromImage.Destination
|
||||
dstImage := toImage.Destination
|
||||
|
||||
res := compiler.ArtifactLayer{FromImage: srcImage, ToImage: dstImage}
|
||||
|
||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
|
||||
srcRootFS, err := ioutil.TempDir(tmpdiffs, "src")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(srcRootFS) // clean up
|
||||
|
||||
dstRootFS, err := ioutil.TempDir(tmpdiffs, "dst")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(dstRootFS) // clean up
|
||||
|
||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
||||
if !strings.HasPrefix(srcImage, "/") {
|
||||
srcImageTar, err := ioutil.TempFile(tmpdiffs, "srctar")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
|
||||
defer os.Remove(srcImageTar.Name()) // clean up
|
||||
srcImageExport := compiler.CompilerBackendOptions{
|
||||
ImageName: srcImage,
|
||||
Destination: srcImageTar.Name(),
|
||||
}
|
||||
err = b.ExportImage(srcImageExport)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting src image "+srcImage)
|
||||
}
|
||||
srcImage = srcImageTar.Name()
|
||||
}
|
||||
|
||||
srcImageExtract := compiler.CompilerBackendOptions{
|
||||
SourcePath: srcImage,
|
||||
ImageName: fromImage.ImageName,
|
||||
Destination: srcRootFS,
|
||||
}
|
||||
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+srcImage)
|
||||
}
|
||||
|
||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
||||
if !strings.HasPrefix(dstImage, "/") {
|
||||
dstImageTar, err := ioutil.TempFile(tmpdiffs, "dsttar")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
|
||||
defer os.Remove(dstImageTar.Name()) // clean up
|
||||
dstImageExport := compiler.CompilerBackendOptions{
|
||||
ImageName: dstImage,
|
||||
Destination: dstImageTar.Name(),
|
||||
}
|
||||
err = b.ExportImage(dstImageExport)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting dst image "+dstImage)
|
||||
}
|
||||
dstImage = dstImageTar.Name()
|
||||
}
|
||||
|
||||
dstImageExtract := compiler.CompilerBackendOptions{
|
||||
SourcePath: dstImage,
|
||||
ImageName: toImage.ImageName,
|
||||
Destination: dstRootFS,
|
||||
}
|
||||
err = b.ExtractRootfs(dstImageExtract, false)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+dstImage)
|
||||
}
|
||||
|
||||
// Get Additions/Changes. dst -> src
|
||||
err = filepath.Walk(dstRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, dstRootFS, "", -1)
|
||||
fileInfo, err := os.Lstat(filepath.Join(srcRootFS, realpath))
|
||||
if err == nil {
|
||||
var sizeA, sizeB int64
|
||||
sizeA = fileInfo.Size()
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
|
||||
if sizeA != sizeB {
|
||||
// fmt.Println("File changed", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Changes = append(res.Diffs.Changes, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
} else {
|
||||
// fmt.Println("File already exists", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
} else {
|
||||
var sizeB int64
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
res.Diffs.Additions = append(res.Diffs.Additions, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
|
||||
// fmt.Println("File created", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
|
||||
}
|
||||
|
||||
// Get deletions. src -> dst
|
||||
err = filepath.Walk(srcRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, srcRootFS, "", -1)
|
||||
if _, err = os.Lstat(filepath.Join(dstRootFS, realpath)); err != nil {
|
||||
// fmt.Println("File deleted", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Deletions = append(res.Diffs.Deletions, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
|
||||
}
|
||||
|
||||
return []compiler.ArtifactLayer{res}, nil
|
||||
}
|
@@ -17,18 +17,17 @@ package backend
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
capi "github.com/mudler/docker-companion/api"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -37,35 +36,32 @@ import (
|
||||
|
||||
type SimpleDocker struct{}
|
||||
|
||||
func NewSimpleDockerBackend() compiler.CompilerBackend {
|
||||
func NewSimpleDockerBackend() *SimpleDocker {
|
||||
return &SimpleDocker{}
|
||||
}
|
||||
|
||||
// TODO: Missing still: labels, and build args expansion
|
||||
func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) BuildImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.SourcePath
|
||||
dockerfileName := opts.DockerFileName
|
||||
context := opts.Context
|
||||
bus.Manager.Publish(bus.EventImagePreBuild, opts)
|
||||
|
||||
if context == "" {
|
||||
context = "."
|
||||
}
|
||||
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
||||
|
||||
Debug(":whale2: Building image " + name)
|
||||
buildarg := genBuildCommand(opts)
|
||||
Info(":whale2: Building image " + name)
|
||||
cmd := exec.Command("docker", buildarg...)
|
||||
cmd.Dir = path
|
||||
out, err := cmd.CombinedOutput()
|
||||
cmd.Dir = opts.SourcePath
|
||||
err := runCommand(cmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
return err
|
||||
}
|
||||
|
||||
Info(":whale: Building image " + name + " done")
|
||||
|
||||
if os.Getenv("DOCKER_SQUASH") == "true" {
|
||||
Info(":whale: Squashing image " + name)
|
||||
var client *docker.Client
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
client, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not connect to the Docker daemon")
|
||||
@@ -74,14 +70,11 @@ func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed squashing image")
|
||||
}
|
||||
|
||||
Info(":whale: Squashing image " + name + " done")
|
||||
}
|
||||
|
||||
if config.LuetCfg.GetGeneral().ShowBuildOutput {
|
||||
Info(string(out))
|
||||
} else {
|
||||
Debug(string(out))
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -97,16 +90,25 @@ func (*SimpleDocker) CopyImage(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) DownloadImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePrePull, opts)
|
||||
|
||||
buildarg := []string{"pull", name}
|
||||
Debug(":whale: Downloading image " + name)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
cmd := exec.Command("docker", buildarg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed pulling image: "+string(out))
|
||||
}
|
||||
|
||||
Info(":whale: Downloaded image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPull, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -127,7 +129,7 @@ func (*SimpleDocker) ImageAvailable(imagename string) bool {
|
||||
return imageAvailable(imagename)
|
||||
}
|
||||
|
||||
func (*SimpleDocker) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) RemoveImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"rmi", name}
|
||||
out, err := exec.Command("docker", buildarg...).CombinedOutput()
|
||||
@@ -139,19 +141,26 @@ func (*SimpleDocker) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) Push(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) Push(opts Options) error {
|
||||
name := opts.ImageName
|
||||
pusharg := []string{"push", name}
|
||||
bus.Manager.Publish(bus.EventImagePrePush, opts)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
out, err := exec.Command("docker", pusharg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed pushing image: "+string(out))
|
||||
}
|
||||
Info(":whale: Pushed image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPush, opts)
|
||||
|
||||
//Info(string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
|
||||
func (s *SimpleDocker) ImageDefinitionToTar(opts Options) error {
|
||||
if err := s.BuildImage(opts); err != nil {
|
||||
return errors.Wrap(err, "Failed building image")
|
||||
}
|
||||
@@ -164,18 +173,22 @@ func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) ExportImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
|
||||
buildarg := []string{"save", name, "-o", path}
|
||||
Debug(":whale: Saving image " + name)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
out, err := exec.Command("docker", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed exporting image: "+string(out))
|
||||
}
|
||||
|
||||
Info(":whale: Exported image:", name)
|
||||
Debug(":whale: Exported image:", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -183,10 +196,29 @@ type ManifestEntry struct {
|
||||
Layers []string `json:"Layers"`
|
||||
}
|
||||
|
||||
func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||
src := opts.SourcePath
|
||||
func (b *SimpleDocker) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
name := opts.ImageName
|
||||
dst := opts.Destination
|
||||
|
||||
tempexport, err := ioutil.TempDir(dst, "tmprootfs")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tempexport) // clean up
|
||||
|
||||
imageExport := filepath.Join(tempexport, "image.tar")
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
if err := b.ExportImage(Options{ImageName: name, Destination: imageExport}); err != nil {
|
||||
return errors.Wrap(err, "failed while extracting rootfs for "+name)
|
||||
}
|
||||
|
||||
SpinnerStop()
|
||||
|
||||
src := imageExport
|
||||
|
||||
if src == "" && opts.ImageName != "" {
|
||||
tempUnpack, err := ioutil.TempDir(dst, "tempUnpack")
|
||||
if err != nil {
|
||||
@@ -194,7 +226,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
}
|
||||
defer os.RemoveAll(tempUnpack) // clean up
|
||||
imageExport := filepath.Join(tempUnpack, "image.tar")
|
||||
if err := b.ExportImage(compiler.CompilerBackendOptions{ImageName: opts.ImageName, Destination: imageExport}); err != nil {
|
||||
if err := b.ExportImage(Options{ImageName: opts.ImageName, Destination: imageExport}); err != nil {
|
||||
return errors.Wrap(err, "while exporting image before extraction")
|
||||
}
|
||||
src = imageExport
|
||||
@@ -246,7 +278,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
return err
|
||||
}
|
||||
|
||||
err = export.UnPackLayers(layers_sha, dst, "")
|
||||
err = export.UnPackLayers(layers_sha, dst, "containerd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -258,21 +290,3 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Changes retrieves changes between image layers
|
||||
func (d *SimpleDocker) Changes(fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
diffs, err := GenerateChanges(d, fromImage, toImage)
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
summary := compiler.ComputeArtifactLayerSummary(diffs)
|
||||
for _, l := range summary.Layers {
|
||||
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
|
||||
l.FromImage, l.ToImage,
|
||||
l.AddFiles, l.AddSizes,
|
||||
l.DelFiles, l.DelSizes,
|
||||
l.ChangeFiles, l.ChangeSizes))
|
||||
}
|
||||
}
|
||||
|
||||
return diffs, err
|
||||
}
|
||||
|
@@ -16,9 +16,11 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -41,13 +43,10 @@ var _ = Describe("Docker backend", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -71,15 +70,16 @@ ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
opts := backend.Options{
|
||||
ImageName: "luet/base",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "Dockerfile",
|
||||
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
||||
}
|
||||
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -94,34 +94,46 @@ ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts2 := CompilerBackendOptions{
|
||||
opts2 := backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
Destination: filepath.Join(tmpdir, "output2.tar"),
|
||||
}
|
||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
|
||||
artifacts := []ArtifactNode{{
|
||||
artifacts := []artifact.ArtifactNode{{
|
||||
Name: "/luetbuild/LuetDockerfile",
|
||||
Size: 175,
|
||||
}}
|
||||
if os.Getenv("DOCKER_BUILDKIT") == "1" {
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
}
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test2", Size: 4})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test2", Size: 4})
|
||||
|
||||
Expect(b.Changes(opts, opts2)).To(Equal(
|
||||
[]ArtifactLayer{{
|
||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
||||
Diffs: ArtifactDiffs{
|
||||
Expect(compiler.GenerateChanges(b, opts, opts2)).To(Equal(
|
||||
[]artifact.ArtifactLayer{{
|
||||
FromImage: "luet/base",
|
||||
ToImage: "test",
|
||||
Diffs: artifact.ArtifactDiffs{
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
|
||||
opts2 = backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
Destination: filepath.Join(tmpdir, "output3.tar"),
|
||||
}
|
||||
|
||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||
Expect(b.ImageExists(opts2.ImageName)).To(BeFalse())
|
||||
})
|
||||
|
||||
It("Detects available images", func() {
|
||||
|
@@ -18,8 +18,10 @@ package backend
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -27,37 +29,33 @@ import (
|
||||
|
||||
type SimpleImg struct{}
|
||||
|
||||
func NewSimpleImgBackend() compiler.CompilerBackend {
|
||||
func NewSimpleImgBackend() *SimpleImg {
|
||||
return &SimpleImg{}
|
||||
}
|
||||
|
||||
// TODO: Missing still: labels, and build args expansion
|
||||
func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) BuildImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.SourcePath
|
||||
context := opts.Context
|
||||
bus.Manager.Publish(bus.EventImagePreBuild, opts)
|
||||
|
||||
if context == "" {
|
||||
context = "."
|
||||
}
|
||||
dockerfileName := opts.DockerFileName
|
||||
buildarg := genBuildCommand(opts)
|
||||
|
||||
Info(":tea: Building image " + name)
|
||||
|
||||
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
Debug(":tea: Building image " + name)
|
||||
cmd := exec.Command("img", buildarg...)
|
||||
cmd.Dir = path
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
cmd.Dir = opts.SourcePath
|
||||
err := runCommand(cmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
return err
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
|
||||
Info(":tea: Building image " + name + " done")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) RemoveImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"rm", name}
|
||||
Spinner(22)
|
||||
@@ -71,12 +69,16 @@ func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
|
||||
func (*SimpleImg) DownloadImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"pull", name}
|
||||
bus.Manager.Publish(bus.EventImagePrePull, opts)
|
||||
|
||||
buildarg := []string{"pull", name}
|
||||
Debug(":tea: Downloading image " + name)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
cmd := exec.Command("img", buildarg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@@ -84,6 +86,7 @@ func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
}
|
||||
|
||||
Info(":tea: Image " + name + " downloaded")
|
||||
bus.Manager.Publish(bus.EventImagePostPull, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -106,14 +109,20 @@ func (*SimpleImg) ImageAvailable(imagename string) bool {
|
||||
return imageAvailable(imagename)
|
||||
}
|
||||
|
||||
// ImageExists check if the given image is available locally
|
||||
func (*SimpleImg) ImageExists(imagename string) bool {
|
||||
// NOOP: not implemented
|
||||
// TODO: Since img doesn't have an inspect command,
|
||||
// we need to parse the ls output manually
|
||||
cmd := exec.Command("img", "ls")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if strings.Contains(string(out), imagename) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
|
||||
func (s *SimpleImg) ImageDefinitionToTar(opts Options) error {
|
||||
if err := s.BuildImage(opts); err != nil {
|
||||
return errors.Wrap(err, "Failed building image")
|
||||
}
|
||||
@@ -126,11 +135,15 @@ func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) ExportImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
buildarg := []string{"save", "-o", path, name}
|
||||
Debug(":tea: Saving image " + name)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed exporting image: "+string(out))
|
||||
@@ -140,35 +153,44 @@ func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
}
|
||||
|
||||
// ExtractRootfs extracts the docker image content inside the destination
|
||||
func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||
func (s *SimpleImg) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
|
||||
if !s.ImageExists(name) {
|
||||
if err := s.DownloadImage(opts); err != nil {
|
||||
return errors.Wrap(err, "failed pulling image "+name+" during extraction")
|
||||
}
|
||||
}
|
||||
|
||||
os.RemoveAll(path)
|
||||
|
||||
buildarg := []string{"unpack", "-o", path, name}
|
||||
Debug(":tea: Extracting image " + name)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed extracting image: "+string(out))
|
||||
}
|
||||
Info(":tea: Image " + name + " extracted")
|
||||
Debug(":tea: Image " + name + " extracted")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Use container-diff (https://github.com/GoogleContainerTools/container-diff) for checking out layer diffs
|
||||
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
|
||||
func (i *SimpleImg) Changes(fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
return GenerateChanges(i, fromImage, toImage)
|
||||
}
|
||||
|
||||
func (*SimpleImg) Push(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) Push(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePrePush, opts)
|
||||
|
||||
pusharg := []string{"push", name}
|
||||
out, err := exec.Command("img", pusharg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed pushing image: "+string(out))
|
||||
}
|
||||
Info(":tea: Pushed image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPush, opts)
|
||||
|
||||
//Info(string(out))
|
||||
return nil
|
||||
}
|
||||
|
48
pkg/compiler/backend/writer.go
Normal file
48
pkg/compiler/backend/writer.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright © 2021 Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
// Ettore Di Giacinto <mudler@sabayonlinux.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
type BackendWriter struct {
|
||||
BufferedOutput bool
|
||||
Buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewBackendWriter(buffered bool) *BackendWriter {
|
||||
return &BackendWriter{
|
||||
BufferedOutput: buffered,
|
||||
Buffer: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BackendWriter) Write(p []byte) (int, error) {
|
||||
if b.BufferedOutput {
|
||||
return b.Buffer.Write(p)
|
||||
}
|
||||
|
||||
Msg("info", false, false, (string(p)))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *BackendWriter) Close() error { return nil }
|
||||
func (b *BackendWriter) GetCombinedOutput() string { return b.Buffer.String() }
|
@@ -13,10 +13,10 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend_test
|
||||
package compiler_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
var _ = Describe("Docker image diffs", func() {
|
||||
var b compiler.CompilerBackend
|
||||
var b CompilerBackend
|
||||
|
||||
BeforeEach(func() {
|
||||
b = NewSimpleDockerBackend()
|
||||
@@ -32,9 +32,8 @@ var _ = Describe("Docker image diffs", func() {
|
||||
|
||||
Context("Generate diffs from docker images", func() {
|
||||
It("Detect no changes", func() {
|
||||
opts := compiler.CompilerBackendOptions{
|
||||
ImageName: "alpine:latest",
|
||||
Destination: "alpine:latest",
|
||||
opts := Options{
|
||||
ImageName: "alpine:latest",
|
||||
}
|
||||
err := b.DownloadImage(opts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -48,21 +47,19 @@ var _ = Describe("Docker image diffs", func() {
|
||||
})
|
||||
|
||||
It("Detects additions and changed files", func() {
|
||||
err := b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
err := b.DownloadImage(Options{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
err = b.DownloadImage(Options{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
layers, err := GenerateChanges(b, compiler.CompilerBackendOptions{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
Destination: "quay.io/mocaccino/micro",
|
||||
}, compiler.CompilerBackendOptions{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
Destination: "quay.io/mocaccino/extra",
|
||||
layers, err := GenerateChanges(b, Options{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
}, Options{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(layers)).To(Equal(1))
|
@@ -1,4 +1,4 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"regexp"
|
||||
@@ -26,60 +27,65 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const BuildFile = "build.yaml"
|
||||
const DefinitionFile = "definition.yaml"
|
||||
const CollectionFile = "collection.yaml"
|
||||
|
||||
type LuetCompiler struct {
|
||||
*tree.CompilerRecipe
|
||||
Backend CompilerBackend
|
||||
Database pkg.PackageDatabase
|
||||
ImageRepository string
|
||||
PullFirst, KeepImg, Clean bool
|
||||
Concurrency int
|
||||
CompressionType CompressionImplementation
|
||||
Options CompilerOptions
|
||||
SolverOptions solver.Options
|
||||
}
|
||||
type ArtifactIndex []*artifact.PackageArtifact
|
||||
|
||||
func NewLuetCompiler(backend CompilerBackend, db pkg.PackageDatabase, opt *CompilerOptions, solvopts solver.Options) Compiler {
|
||||
// The CompilerRecipe will gives us a tree with only build deps listed.
|
||||
return &LuetCompiler{
|
||||
Backend: backend,
|
||||
CompilerRecipe: &tree.CompilerRecipe{
|
||||
tree.Recipe{Database: db},
|
||||
},
|
||||
Database: db,
|
||||
ImageRepository: opt.ImageRepository,
|
||||
PullFirst: opt.PullFirst,
|
||||
CompressionType: opt.CompressionType,
|
||||
KeepImg: opt.KeepImg,
|
||||
Concurrency: opt.Concurrency,
|
||||
Options: *opt,
|
||||
SolverOptions: solvopts,
|
||||
func (i ArtifactIndex) CleanPath() ArtifactIndex {
|
||||
newIndex := ArtifactIndex{}
|
||||
for _, art := range i {
|
||||
copy := art.ShallowCopy()
|
||||
copy.Path = path.Base(art.Path)
|
||||
newIndex = append(newIndex, copy)
|
||||
}
|
||||
return newIndex
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) SetConcurrency(i int) {
|
||||
cs.Concurrency = i
|
||||
type LuetCompiler struct {
|
||||
//*tree.CompilerRecipe
|
||||
Backend CompilerBackend
|
||||
Database pkg.PackageDatabase
|
||||
Options options.Compiler
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) SetCompressionType(t CompressionImplementation) {
|
||||
cs.CompressionType = t
|
||||
func NewCompiler(p ...options.Option) *LuetCompiler {
|
||||
c := options.NewDefaultCompiler()
|
||||
c.Apply(p...)
|
||||
|
||||
return &LuetCompiler{Options: *c}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan CompilationSpec, a *[]Artifact, m *sync.Mutex, concurrency int, keepPermissions bool, errors chan error) {
|
||||
func NewLuetCompiler(backend CompilerBackend, db pkg.PackageDatabase, compilerOpts ...options.Option) *LuetCompiler {
|
||||
// The CompilerRecipe will gives us a tree with only build deps listed.
|
||||
|
||||
c := NewCompiler(compilerOpts...)
|
||||
// c.Options.BackendType
|
||||
c.Backend = backend
|
||||
c.Database = db
|
||||
// c.CompilerRecipe = &tree.CompilerRecipe{
|
||||
// Recipe: tree.Recipe{Database: db},
|
||||
// }
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan *compilerspec.LuetCompilationSpec, a *[]*artifact.PackageArtifact, m *sync.Mutex, concurrency int, keepPermissions bool, errors chan error) {
|
||||
defer wg.Done()
|
||||
|
||||
for s := range cspecs {
|
||||
@@ -94,17 +100,18 @@ func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan Co
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error) {
|
||||
// CompileWithReverseDeps compiles the supplied compilationspecs and their reverse dependencies
|
||||
func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps *compilerspec.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) {
|
||||
artifacts, err := cs.CompileParallel(keepPermissions, ps)
|
||||
if len(err) != 0 {
|
||||
return artifacts, err
|
||||
}
|
||||
|
||||
Info(":ant: Resolving reverse dependencies")
|
||||
toCompile := NewLuetCompilationspecs()
|
||||
toCompile := compilerspec.NewLuetCompilationspecs()
|
||||
for _, a := range artifacts {
|
||||
|
||||
revdeps := a.GetCompileSpec().GetPackage().Revdeps(cs.Database)
|
||||
revdeps := a.CompileSpec.GetPackage().Revdeps(cs.Database)
|
||||
for _, r := range revdeps {
|
||||
spec, asserterr := cs.FromPackage(r)
|
||||
if err != nil {
|
||||
@@ -114,28 +121,6 @@ func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps Compilat
|
||||
|
||||
toCompile.Add(spec)
|
||||
}
|
||||
// for _, assertion := range a.GetSourceAssertion() {
|
||||
// if assertion.Value && assertion.Package.Flagged() {
|
||||
// spec, asserterr := cs.FromPackage(assertion.Package)
|
||||
// if err != nil {
|
||||
// return nil, append(err, asserterr)
|
||||
// }
|
||||
// w, asserterr := cs.Tree().World()
|
||||
// if err != nil {
|
||||
// return nil, append(err, asserterr)
|
||||
// }
|
||||
// revdeps := spec.GetPackage().Revdeps(&w)
|
||||
// for _, r := range revdeps {
|
||||
// spec, asserterr := cs.FromPackage(r)
|
||||
// if asserterr != nil {
|
||||
// return nil, append(err, asserterr)
|
||||
// }
|
||||
// spec.SetOutputPath(ps.All()[0].GetOutputPath())
|
||||
|
||||
// toCompile.Add(spec)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
uniques := toCompile.Unique().Remove(ps)
|
||||
@@ -147,17 +132,17 @@ func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps Compilat
|
||||
return append(artifacts, artifacts2...), err
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error) {
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
all := make(chan CompilationSpec)
|
||||
artifacts := []Artifact{}
|
||||
// CompileParallel compiles the supplied compilationspecs in parallel
|
||||
// to note, no specific heuristic is implemented, and the specs are run in parallel as they are.
|
||||
func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps *compilerspec.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) {
|
||||
all := make(chan *compilerspec.LuetCompilationSpec)
|
||||
artifacts := []*artifact.PackageArtifact{}
|
||||
mutex := &sync.Mutex{}
|
||||
errors := make(chan error, ps.Len())
|
||||
var wg = new(sync.WaitGroup)
|
||||
for i := 0; i < cs.Concurrency; i++ {
|
||||
for i := 0; i < cs.Options.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go cs.compilerWorker(i, wg, all, &artifacts, mutex, cs.Concurrency, keepPermissions, errors)
|
||||
go cs.compilerWorker(i, wg, all, &artifacts, mutex, cs.Options.Concurrency, keepPermissions, errors)
|
||||
}
|
||||
|
||||
for _, p := range ps.All() {
|
||||
@@ -236,7 +221,20 @@ func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, includ
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSpec) (Artifact, error) {
|
||||
func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec, runnerOpts backend.Options) (*artifact.PackageArtifact, error) {
|
||||
|
||||
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
err = cs.Backend.ExtractRootfs(backend.Options{
|
||||
ImageName: runnerOpts.ImageName, Destination: rootfs}, keepPermissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
}
|
||||
|
||||
if p.GetPackageDir() != "" {
|
||||
Info(":tophat: Packing from output dir", p.GetPackageDir())
|
||||
rootfs = filepath.Join(rootfs, p.GetPackageDir())
|
||||
@@ -247,55 +245,61 @@ func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSp
|
||||
cs.stripFromRootfs(p.GetIncludes(), rootfs, true)
|
||||
}
|
||||
if len(p.GetExcludes()) > 0 {
|
||||
// strip from includes
|
||||
// strip from excludes
|
||||
cs.stripFromRootfs(p.GetExcludes(), rootfs, false)
|
||||
}
|
||||
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
a := artifact.NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
a.CompressionType = cs.Options.CompressionType
|
||||
|
||||
if err := artifact.Compress(rootfs, concurrency); err != nil {
|
||||
if err := a.Compress(rootfs, concurrency); err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
|
||||
artifact.SetCompileSpec(p)
|
||||
return artifact, nil
|
||||
a.CompileSpec = p
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) unpackDelta(rootfs string, concurrency int, keepPermissions bool, p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions) (Artifact, error) {
|
||||
func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec, builderOpts, runnerOpts backend.Options) (*artifact.PackageArtifact, error) {
|
||||
|
||||
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||
if cs.Options.PullFirst && !cs.Backend.ImageExists(builderOpts.ImageName) && cs.Backend.ImageAvailable(builderOpts.ImageName) {
|
||||
err := cs.Backend.DownloadImage(builderOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not pull image")
|
||||
}
|
||||
} else if !cs.Backend.ImageExists(builderOpts.ImageName) {
|
||||
return nil, errors.New("No image found for " + builderOpts.ImageName)
|
||||
}
|
||||
if err := cs.Backend.ExportImage(builderOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Could not export image"+builderOpts.ImageName)
|
||||
}
|
||||
if !cs.Options.KeepImageExport {
|
||||
defer os.Remove(builderOpts.Destination)
|
||||
}
|
||||
|
||||
Info(pkgTag, ":hammer: Generating delta")
|
||||
diffs, err := cs.Backend.Changes(builderOpts, runnerOpts)
|
||||
diffs, err := GenerateChanges(cs.Backend, builderOpts, runnerOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
||||
}
|
||||
artifact, err := ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.CompressionType)
|
||||
|
||||
Debug("Extracting image to grab files from delta")
|
||||
if err := cs.Backend.ExtractRootfs(backend.Options{
|
||||
ImageName: runnerOpts.ImageName, Destination: rootfs}, keepPermissions); err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
}
|
||||
artifact, err := artifact.ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.Options.CompressionType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||
}
|
||||
|
||||
artifact.SetCompileSpec(p)
|
||||
artifact.CompileSpec = p
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string,
|
||||
concurrency int, keepPermissions bool,
|
||||
p CompilationSpec) (CompilerBackendOptions, CompilerBackendOptions, error) {
|
||||
p *compilerspec.LuetCompilationSpec) (backend.Options, backend.Options, error) {
|
||||
|
||||
var runnerOpts, builderOpts CompilerBackendOptions
|
||||
var runnerOpts, builderOpts backend.Options
|
||||
|
||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||
|
||||
@@ -303,21 +307,19 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
// TODO: As the salt contains the packageImage ( in registry/organization/imagename:tag format)
|
||||
// the images hashes are broken with registry mirrors.
|
||||
// We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// TODO: We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// and identifies the deptree of the package.
|
||||
|
||||
fp := p.GetPackage().HashFingerprint(packageImage)
|
||||
fp := p.GetPackage().HashFingerprint(helpers.StripRegistryFromImage(packageImage))
|
||||
|
||||
if buildertaggedImage == "" {
|
||||
buildertaggedImage = cs.ImageRepository + ":builder-" + fp
|
||||
buildertaggedImage = cs.Options.PushImageRepository + ":builder-" + fp
|
||||
Debug(pkgTag, "Creating intermediary image", buildertaggedImage, "from", image)
|
||||
}
|
||||
|
||||
// TODO: Cleanup, not actually hit
|
||||
if packageImage == "" {
|
||||
packageImage = cs.ImageRepository + ":builder-invalid" + fp
|
||||
return runnerOpts, builderOpts, errors.New("no package image given")
|
||||
}
|
||||
|
||||
p.SetSeedImage(image) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver,
|
||||
@@ -361,23 +363,24 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
||||
}
|
||||
|
||||
builderOpts = CompilerBackendOptions{
|
||||
builderOpts = backend.Options{
|
||||
ImageName: buildertaggedImage,
|
||||
SourcePath: buildDir,
|
||||
DockerFileName: p.GetPackage().GetFingerPrint() + "-builder.dockerfile",
|
||||
Destination: p.Rel(p.GetPackage().GetFingerPrint() + "-builder.image.tar"),
|
||||
BackendArgs: cs.Options.BackendArgs,
|
||||
}
|
||||
runnerOpts = CompilerBackendOptions{
|
||||
runnerOpts = backend.Options{
|
||||
ImageName: packageImage,
|
||||
SourcePath: buildDir,
|
||||
DockerFileName: p.GetPackage().GetFingerPrint() + ".dockerfile",
|
||||
Destination: p.Rel(p.GetPackage().GetFingerPrint() + ".image.tar"),
|
||||
BackendArgs: cs.Options.BackendArgs,
|
||||
}
|
||||
|
||||
buildAndPush := func(opts CompilerBackendOptions) error {
|
||||
buildAndPush := func(opts backend.Options) error {
|
||||
buildImage := true
|
||||
if cs.Options.PullFirst {
|
||||
bus.Manager.Publish(bus.EventImagePrePull, opts)
|
||||
err := cs.Backend.DownloadImage(opts)
|
||||
if err == nil {
|
||||
buildImage = false
|
||||
@@ -385,20 +388,15 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
Warning("Failed to download '" + opts.ImageName + "'. Will keep going and build the image unless you use --fatal")
|
||||
Warning(err.Error())
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostPull, opts)
|
||||
}
|
||||
if buildImage {
|
||||
bus.Manager.Publish(bus.EventImagePreBuild, opts)
|
||||
if err := cs.Backend.BuildImage(opts); err != nil {
|
||||
return errors.Wrap(err, "Could not build image: "+image+" "+opts.DockerFileName)
|
||||
return errors.Wrapf(err, "Could not build image: %s %s", image, opts.DockerFileName)
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
if cs.Options.Push {
|
||||
bus.Manager.Publish(bus.EventImagePrePush, opts)
|
||||
if err = cs.Backend.Push(opts); err != nil {
|
||||
return errors.Wrap(err, "Could not push image: "+image+" "+opts.DockerFileName)
|
||||
return errors.Wrapf(err, "Could not push image: %s %s", image, opts.DockerFileName)
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostPush, opts)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -406,7 +404,7 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
if len(p.GetPreBuildSteps()) != 0 {
|
||||
Info(pkgTag, ":whale: Generating 'builder' image from", image, "as", buildertaggedImage, "with prelude steps")
|
||||
if err := buildAndPush(builderOpts); err != nil {
|
||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
||||
return builderOpts, runnerOpts, errors.Wrapf(err, "Could not push image: %s %s", image, builderOpts.DockerFileName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,16 +412,16 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
// acting as a docker tag.
|
||||
Info(pkgTag, ":whale: Generating 'package' image from", buildertaggedImage, "as", packageImage, "with build steps")
|
||||
if err := buildAndPush(runnerOpts); err != nil {
|
||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
||||
return builderOpts, runnerOpts, errors.Wrapf(err, "Could not push image: %s %s", image, runnerOpts.DockerFileName)
|
||||
}
|
||||
|
||||
return builderOpts, runnerOpts, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) genArtifact(p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions, concurrency int, keepPermissions bool) (Artifact, error) {
|
||||
func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builderOpts, runnerOpts backend.Options, concurrency int, keepPermissions bool) (*artifact.PackageArtifact, error) {
|
||||
|
||||
// generate Artifact
|
||||
var artifact Artifact
|
||||
// generate *artifact.PackageArtifact
|
||||
var a *artifact.PackageArtifact
|
||||
var rootfs string
|
||||
var err error
|
||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||
@@ -438,114 +436,171 @@ func (cs *LuetCompiler) genArtifact(p CompilationSpec, builderOpts, runnerOpts C
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
artifact := NewPackageArtifact(fakePackage)
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
a := artifact.NewPackageArtifact(fakePackage)
|
||||
a.CompressionType = cs.Options.CompressionType
|
||||
|
||||
if err := artifact.Compress(rootfs, concurrency); err != nil {
|
||||
if err := a.Compress(rootfs, concurrency); err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
|
||||
artifact.SetCompileSpec(p)
|
||||
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
a.CompileSpec = p
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
|
||||
err = artifact.WriteYaml(p.GetOutputPath())
|
||||
err = a.WriteYaml(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return artifact, errors.Wrap(err, "Failed while writing metadata file")
|
||||
return a, errors.Wrap(err, "Failed while writing metadata file")
|
||||
}
|
||||
Info(pkgTag, " :white_check_mark: done (empty virtual package)")
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
// prepare folder content of the image with the package compiled inside
|
||||
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed exporting image")
|
||||
}
|
||||
|
||||
if !cs.Options.KeepImageExport {
|
||||
defer os.Remove(runnerOpts.Destination)
|
||||
}
|
||||
|
||||
rootfs, err = ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
// TODO: Compression and such
|
||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
||||
ImageName: runnerOpts.ImageName,
|
||||
SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
return a, nil
|
||||
}
|
||||
|
||||
if p.UnpackedPackage() {
|
||||
// Take content of container as a base for our package files
|
||||
artifact, err = cs.unpackFs(rootfs, concurrency, p)
|
||||
a, err = cs.unpackFs(concurrency, keepPermissions, p, runnerOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while extracting image")
|
||||
}
|
||||
} else {
|
||||
// Generate delta between the two images
|
||||
artifact, err = cs.unpackDelta(rootfs, concurrency, keepPermissions, p, builderOpts, runnerOpts)
|
||||
a, err = cs.unpackDelta(concurrency, keepPermissions, p, builderOpts, runnerOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while generating delta")
|
||||
}
|
||||
}
|
||||
|
||||
filelist, err := artifact.FileList()
|
||||
filelist, err := a.FileList()
|
||||
if err != nil {
|
||||
return artifact, errors.Wrap(err, "Failed getting package list")
|
||||
return a, errors.Wrap(err, "Failed getting package list")
|
||||
}
|
||||
|
||||
artifact.SetFiles(filelist)
|
||||
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
a.Files = filelist
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
|
||||
err = artifact.WriteYaml(p.GetOutputPath())
|
||||
err = a.WriteYaml(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return artifact, errors.Wrap(err, "Failed while writing metadata file")
|
||||
return a, errors.Wrap(err, "Failed while writing metadata file")
|
||||
}
|
||||
Info(pkgTag, " :white_check_mark: Done")
|
||||
|
||||
return artifact, nil
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) waitForImage(image string) {
|
||||
if cs.Options.PullFirst && cs.Options.Wait && !cs.Backend.ImageAvailable(image) {
|
||||
Info(fmt.Sprintf("Waiting for image %s to be available... :zzz:", image))
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
for !cs.Backend.ImageAvailable(image) {
|
||||
Info(fmt.Sprintf("Image %s not available yet, sleeping", image))
|
||||
time.Sleep(5 * time.Second)
|
||||
func (cs *LuetCompiler) waitForImages(images []string) {
|
||||
if cs.Options.PullFirst && cs.Options.Wait {
|
||||
available, _ := oneOfImagesAvailable(images, cs.Backend)
|
||||
if !available {
|
||||
Info(fmt.Sprintf("Waiting for image %s to be available... :zzz:", images))
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
for !available {
|
||||
available, _ = oneOfImagesAvailable(images, cs.Backend)
|
||||
Info(fmt.Sprintf("Image %s not available yet, sleeping", images))
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string,
|
||||
func oneOfImagesExists(images []string, b CompilerBackend) (bool, string) {
|
||||
for _, i := range images {
|
||||
if exists := b.ImageExists(i); exists {
|
||||
return true, i
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
func oneOfImagesAvailable(images []string, b CompilerBackend) (bool, string) {
|
||||
for _, i := range images {
|
||||
if exists := b.ImageAvailable(i); exists {
|
||||
return true, i
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string) string {
|
||||
var resolvedImage string
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)},
|
||||
genImageList(cs.Options.PullImageRepository, imageHash)...)
|
||||
if exists, which := oneOfImagesExists(toChecklist, cs.Backend); exists {
|
||||
resolvedImage = which
|
||||
}
|
||||
if cs.Options.PullFirst {
|
||||
if exists, which := oneOfImagesAvailable(toChecklist, cs.Backend); exists {
|
||||
resolvedImage = which
|
||||
}
|
||||
}
|
||||
|
||||
if resolvedImage == "" {
|
||||
resolvedImage = fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)
|
||||
}
|
||||
return resolvedImage
|
||||
}
|
||||
|
||||
func LoadArtifactFromYaml(spec *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
metaFile := spec.GetPackage().GetMetadataFilePath()
|
||||
dat, err := ioutil.ReadFile(spec.Rel(metaFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading file "+metaFile)
|
||||
}
|
||||
art, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error writing file "+metaFile)
|
||||
}
|
||||
// It is relative, set it back to abs
|
||||
art.Path = spec.Rel(art.Path)
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) getImageArtifact(hash string, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
// we check if there is an available image with the given hash and
|
||||
// we return a full artifact if can be loaded locally.
|
||||
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, hash)},
|
||||
genImageList(cs.Options.PullImageRepository, hash)...)
|
||||
|
||||
exists, _ := oneOfImagesExists(toChecklist, cs.Backend)
|
||||
if art, err := LoadArtifactFromYaml(p); err == nil && exists { // If YAML is correctly loaded, and both images exists, no reason to rebuild.
|
||||
Debug("Package reloaded from YAML. Skipping build")
|
||||
return art, nil
|
||||
}
|
||||
cs.waitForImages(toChecklist)
|
||||
available, _ := oneOfImagesAvailable(toChecklist, cs.Backend)
|
||||
if exists || (cs.Options.PullFirst && available) {
|
||||
Debug("Image available, returning empty artifact")
|
||||
return &artifact.PackageArtifact{}, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("artifact not found")
|
||||
}
|
||||
|
||||
// compileWithImage compiles a PackageTagHash image using the image source, and tagging an indermediate
|
||||
// image buildertaggedImage.
|
||||
// Images that can be resolved from repositories are prefered over the local ones if PullFirst is set to true
|
||||
// avoiding to rebuild images as much as possible
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage string, packageTagHash string,
|
||||
concurrency int,
|
||||
keepPermissions, keepImg bool,
|
||||
p CompilationSpec, generateArtifact bool) (Artifact, error) {
|
||||
p *compilerspec.LuetCompilationSpec, generateArtifact bool) (*artifact.PackageArtifact, error) {
|
||||
|
||||
// If it is a virtual, check if we have to generate an empty artifact or not.
|
||||
if generateArtifact && p.EmptyPackage() && !p.HasImageSource() {
|
||||
return cs.genArtifact(p, CompilerBackendOptions{}, CompilerBackendOptions{}, concurrency, keepPermissions)
|
||||
} else if p.EmptyPackage() && !p.HasImageSource() {
|
||||
return &PackageArtifact{}, nil
|
||||
if generateArtifact && p.IsVirtual() {
|
||||
return cs.genArtifact(p, backend.Options{}, backend.Options{}, concurrency, keepPermissions)
|
||||
} else if p.IsVirtual() {
|
||||
return &artifact.PackageArtifact{}, nil
|
||||
}
|
||||
|
||||
if !generateArtifact {
|
||||
exists := cs.Backend.ImageExists(packageImage)
|
||||
if art, err := LoadArtifactFromYaml(p); err == nil && exists { // If YAML is correctly loaded, and both images exists, no reason to rebuild.
|
||||
Debug("Artifact reloaded from YAML. Skipping build")
|
||||
return art, err
|
||||
}
|
||||
cs.waitForImage(packageImage)
|
||||
if cs.Options.PullFirst && cs.Backend.ImageAvailable(packageImage) {
|
||||
return &PackageArtifact{}, nil
|
||||
// try to avoid regenerating the image if possible by checking the hash in the
|
||||
// given repositories
|
||||
// It is best effort. If we fail resolving, we will generate the images and keep going
|
||||
if art, err := cs.getImageArtifact(packageTagHash, p); err == nil {
|
||||
return art, nil
|
||||
}
|
||||
}
|
||||
|
||||
// always going to point at the destination from the repo defined
|
||||
packageImage := fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, packageTagHash)
|
||||
builderOpts, runnerOpts, err := cs.buildPackageImage(image, buildertaggedImage, packageImage, concurrency, keepPermissions, p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed building package image")
|
||||
@@ -564,14 +619,16 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
}
|
||||
|
||||
if !generateArtifact {
|
||||
return &PackageArtifact{}, nil
|
||||
return &artifact.PackageArtifact{}, nil
|
||||
}
|
||||
|
||||
return cs.genArtifact(p, builderOpts, runnerOpts, concurrency, keepPermissions)
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]CompilationSpec, error) {
|
||||
compilerSpecs := NewLuetCompilationspecs()
|
||||
// FromDatabase returns all the available compilation specs from a database. If the minimum flag is returned
|
||||
// it will be computed a minimal subset that will guarantees that all packages are built ( if not targeting a single package explictly )
|
||||
func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]*compilerspec.LuetCompilationSpec, error) {
|
||||
compilerSpecs := compilerspec.NewLuetCompilationspecs()
|
||||
|
||||
w := db.World()
|
||||
|
||||
@@ -595,11 +652,11 @@ func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst s
|
||||
}
|
||||
|
||||
// ComputeMinimumCompilableSet strips specs that are eventually compiled by leafs
|
||||
func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...CompilationSpec) ([]CompilationSpec, error) {
|
||||
func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompilationSpec) ([]*compilerspec.LuetCompilationSpec, error) {
|
||||
// Generate a set with all the deps of the provided specs
|
||||
// we will use that set to remove the deps from the list of provided compilation specs
|
||||
allDependencies := solver.PackagesAssertions{} // Get all packages that will be in deps
|
||||
result := []CompilationSpec{}
|
||||
result := []*compilerspec.LuetCompilationSpec{}
|
||||
for _, spec := range p {
|
||||
ass, err := cs.ComputeDepTree(spec)
|
||||
if err != nil {
|
||||
@@ -617,9 +674,11 @@ func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...CompilationSpec) ([]Com
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error) {
|
||||
// ComputeDepTree computes the dependency tree of a compilation spec and returns solver assertions
|
||||
// in order to be able to compile the spec.
|
||||
func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (solver.PackagesAssertions, error) {
|
||||
|
||||
s := solver.NewResolver(cs.SolverOptions, pkg.NewInMemoryDatabase(false), cs.Database, pkg.NewInMemoryDatabase(false), cs.Options.SolverOptions.Resolver())
|
||||
s := solver.NewResolver(cs.Options.SolverOptions.Options, pkg.NewInMemoryDatabase(false), cs.Database, pkg.NewInMemoryDatabase(false), cs.Options.SolverOptions.Resolver())
|
||||
|
||||
solution, err := s.Install(pkg.Packages{p.GetPackage()})
|
||||
if err != nil {
|
||||
@@ -646,17 +705,32 @@ func (cs *LuetCompiler) ComputeDepTree(p CompilationSpec) (solver.PackagesAssert
|
||||
return assertions, nil
|
||||
}
|
||||
|
||||
// Compile is non-parallel
|
||||
func (cs *LuetCompiler) Compile(keepPermissions bool, p CompilationSpec) (Artifact, error) {
|
||||
// Compile is a non-parallel version of CompileParallel. It builds the compilation specs and generates
|
||||
// an artifact
|
||||
func (cs *LuetCompiler) Compile(keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
asserts, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.SetSourceAssertion(asserts)
|
||||
return cs.compile(cs.Concurrency, keepPermissions, p)
|
||||
return cs.compile(cs.Options.Concurrency, keepPermissions, p)
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p CompilationSpec) (Artifact, error) {
|
||||
func genImageList(refs []string, hash string) []string {
|
||||
var res []string
|
||||
for _, r := range refs {
|
||||
res = append(res, fmt.Sprintf("%s:%s", r, hash))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
if len(p.BuildOptions.PullImageRepository) != 0 {
|
||||
orig := cs.Options.PullImageRepository
|
||||
cs.Options.PullImageRepository = append(orig, p.BuildOptions.PullImageRepository...)
|
||||
defer func() { cs.Options.PullImageRepository = orig }()
|
||||
}
|
||||
|
||||
Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
|
||||
|
||||
Debug(fmt.Sprintf("%s: has images %t, empty package: %t", p.GetPackage().HumanReadableString(), p.HasImageSource(), p.EmptyPackage()))
|
||||
@@ -669,20 +743,22 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
}
|
||||
|
||||
targetAssertion := p.GetSourceAssertion().Search(p.GetPackage().GetFingerPrint())
|
||||
targetPackageHash := cs.ImageRepository + ":" + targetAssertion.Hash.PackageHash
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePreBuild, struct {
|
||||
CompileSpec CompilationSpec
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Assert solver.PackageAssert
|
||||
}{
|
||||
CompileSpec: p,
|
||||
Assert: *targetAssertion,
|
||||
})
|
||||
|
||||
// Update compilespec build options - it will be then serialized into the compilation metadata file
|
||||
p.SetBuildOptions(cs.Options)
|
||||
|
||||
// - If image is set we just generate a plain dockerfile
|
||||
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
|
||||
if p.GetImage() != "" {
|
||||
return cs.compileWithImage(p.GetImage(), "", targetPackageHash, concurrency, keepPermissions, cs.KeepImg, p, true)
|
||||
return cs.compileWithImage(p.GetImage(), "", targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
}
|
||||
|
||||
// - If image is not set, we read a base_image. Then we will build one image from it to kick-off our build based
|
||||
@@ -691,7 +767,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
// - We later on compute an hash used to identify the image, so each similar deptree keeps the same build image.
|
||||
|
||||
dependencies := p.GetSourceAssertion().Drop(p.GetPackage()) // at this point we should have a flattened list of deps to build, including all of them (with all constraints propagated already)
|
||||
departifacts := []Artifact{} // TODO: Return this somehow
|
||||
departifacts := []*artifact.PackageArtifact{} // TODO: Return this somehow
|
||||
var lastHash string
|
||||
depsN := 0
|
||||
currentN := 0
|
||||
@@ -713,75 +789,76 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
|
||||
}
|
||||
compileSpec.SetOutputPath(p.GetOutputPath())
|
||||
|
||||
buildImageHash := cs.ImageRepository + ":" + assertion.Hash.BuildHash
|
||||
currentPackageImageHash := cs.ImageRepository + ":" + assertion.Hash.PackageHash
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from", buildImageHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image name", currentPackageImageHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from hash", assertion.Hash.BuildHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image from hash", assertion.Hash.PackageHash)
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePreBuild, struct {
|
||||
CompileSpec CompilationSpec
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Assert solver.PackageAssert
|
||||
}{
|
||||
CompileSpec: compileSpec,
|
||||
Assert: assertion,
|
||||
})
|
||||
|
||||
lastHash = currentPackageImageHash
|
||||
lastHash = assertion.Hash.PackageHash
|
||||
// for the source instead, pick an image and a buildertaggedImage from hashes if they exists.
|
||||
// otherways fallback to the pushed repo
|
||||
// Resolve images from the hashtree
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(assertion.Hash.BuildHash)
|
||||
if compileSpec.GetImage() != "" {
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from image")
|
||||
artifact, err := cs.compileWithImage(compileSpec.GetImage(), buildImageHash, currentPackageImageHash, concurrency, keepPermissions, cs.KeepImg, compileSpec, packageDeps)
|
||||
|
||||
a, err := cs.compileWithImage(compileSpec.GetImage(), resolvedBuildImage, assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
}
|
||||
departifacts = append(departifacts, artifact)
|
||||
departifacts = append(departifacts, a)
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
continue
|
||||
}
|
||||
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from tree")
|
||||
artifact, err := cs.compileWithImage(buildImageHash, "", currentPackageImageHash, concurrency, keepPermissions, cs.KeepImg, compileSpec, packageDeps)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, "", assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
// deperrs = append(deperrs, err)
|
||||
// break // stop at first error
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePostBuild, struct {
|
||||
CompileSpec CompilationSpec
|
||||
Artifact Artifact
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Artifact *artifact.PackageArtifact
|
||||
}{
|
||||
CompileSpec: compileSpec,
|
||||
Artifact: artifact,
|
||||
Artifact: a,
|
||||
})
|
||||
|
||||
departifacts = append(departifacts, artifact)
|
||||
departifacts = append(departifacts, a)
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
}
|
||||
|
||||
} else if len(dependencies) > 0 {
|
||||
lastHash = cs.ImageRepository + ":" + dependencies[len(dependencies)-1].Hash.PackageHash
|
||||
lastHash = dependencies[len(dependencies)-1].Hash.PackageHash
|
||||
}
|
||||
|
||||
if !cs.Options.OnlyDeps {
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(lastHash)
|
||||
Info(":rocket: All dependencies are satisfied, building package requested by the user", p.GetPackage().HumanReadableString())
|
||||
Info(":package:", p.GetPackage().HumanReadableString(), " Using image: ", lastHash)
|
||||
artifact, err := cs.compileWithImage(lastHash, "", targetPackageHash, concurrency, keepPermissions, cs.KeepImg, p, true)
|
||||
Info(":package:", p.GetPackage().HumanReadableString(), " Using image: ", resolvedBuildImage)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, "", targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
if err != nil {
|
||||
return artifact, err
|
||||
return a, err
|
||||
}
|
||||
artifact.SetDependencies(departifacts)
|
||||
artifact.SetSourceAssertion(p.GetSourceAssertion())
|
||||
a.Dependencies = departifacts
|
||||
a.SourceAssertion = p.GetSourceAssertion()
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePostBuild, struct {
|
||||
CompileSpec CompilationSpec
|
||||
Artifact Artifact
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Artifact *artifact.PackageArtifact
|
||||
}{
|
||||
CompileSpec: p,
|
||||
Artifact: artifact,
|
||||
Artifact: a,
|
||||
})
|
||||
|
||||
return artifact, err
|
||||
return a, err
|
||||
} else {
|
||||
return departifacts[len(departifacts)-1], nil
|
||||
}
|
||||
@@ -789,16 +866,18 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
|
||||
type templatedata map[string]interface{}
|
||||
|
||||
func (cs *LuetCompiler) FromPackage(p pkg.Package) (CompilationSpec, error) {
|
||||
|
||||
pack, err := cs.Database.FindPackageCandidate(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.Package) ([]byte, error) {
|
||||
|
||||
var dataresult []byte
|
||||
|
||||
val := pack.Rel(DefinitionFile)
|
||||
|
||||
// Update processed build values
|
||||
dst, err := helpers.UnMarshalValues(cs.Options.BuildValuesFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
cs.Options.BuildValues = append(vals, (map[string]interface{})(dst))
|
||||
|
||||
if _, err := os.Stat(pack.Rel(CollectionFile)); err == nil {
|
||||
val = pack.Rel(CollectionFile)
|
||||
|
||||
@@ -811,42 +890,118 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (CompilationSpec, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+val)
|
||||
}
|
||||
|
||||
packsRaw, err := pkg.GetRawPackages(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting raw packages")
|
||||
}
|
||||
|
||||
raw := packsRaw.Find(pack.GetName(), pack.GetCategory(), pack.GetVersion())
|
||||
|
||||
d := map[string]interface{}{}
|
||||
if len(cs.Options.BuildValuesFile) > 0 {
|
||||
defBuild, err := ioutil.ReadFile(cs.Options.BuildValuesFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+val)
|
||||
}
|
||||
err = yaml.Unmarshal(defBuild, &d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+val)
|
||||
td := templatedata{}
|
||||
if len(vals) > 0 {
|
||||
for _, bv := range vals {
|
||||
current := templatedata(bv)
|
||||
if err := mergo.Merge(&td, current); err != nil {
|
||||
return nil, errors.Wrap(err, "merging values maps")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dat, err := helpers.RenderHelm(string(dataBuild), raw, d)
|
||||
if err := mergo.Merge(&td, templatedata(raw)); err != nil {
|
||||
return nil, errors.Wrap(err, "merging values maps")
|
||||
}
|
||||
|
||||
dat, err := helpers.RenderHelm(string(dataBuild), td, dst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+pack.Rel(BuildFile))
|
||||
}
|
||||
dataresult = []byte(dat)
|
||||
} else {
|
||||
out, err := helpers.RenderFiles(pack.Rel(BuildFile), val, cs.Options.BuildValuesFile)
|
||||
bv := cs.Options.BuildValuesFile
|
||||
if len(vals) > 0 {
|
||||
valuesdir, err := ioutil.TempDir("", "genvalues")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(valuesdir) // clean up
|
||||
for _, b := range vals {
|
||||
out, err := yaml.Marshal(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while marshalling values file")
|
||||
}
|
||||
f := filepath.Join(valuesdir, helpers.RandStringRunes(20))
|
||||
if err := ioutil.WriteFile(f, out, os.ModePerm); err != nil {
|
||||
return nil, errors.Wrap(err, "while writing temporary values file")
|
||||
}
|
||||
bv = append([]string{f}, bv...)
|
||||
}
|
||||
}
|
||||
out, err := helpers.RenderFiles(pack.Rel(BuildFile), val, bv...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+pack.Rel(BuildFile))
|
||||
}
|
||||
dataresult = []byte(out)
|
||||
}
|
||||
|
||||
return NewLuetCompilationSpec(dataresult, pack)
|
||||
return dataresult, nil
|
||||
}
|
||||
|
||||
// FromPackage returns a compilation spec from a package definition
|
||||
func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilationSpec, error) {
|
||||
|
||||
pack, err := cs.Database.FindPackageCandidate(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := options.Compiler{}
|
||||
|
||||
artifactMetadataFile := filepath.Join(p.GetTreeDir(), "..", p.GetMetadataFilePath())
|
||||
Debug("Checking if metadata file is present", artifactMetadataFile)
|
||||
if _, err := os.Stat(artifactMetadataFile); err == nil {
|
||||
f, err := os.Open(artifactMetadataFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not open %s", artifactMetadataFile)
|
||||
}
|
||||
dat, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
art, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not decode package from yaml")
|
||||
}
|
||||
|
||||
Debug("Read build options:", art.CompileSpec.BuildOptions)
|
||||
opts = art.CompileSpec.BuildOptions
|
||||
opts.PushImageRepository = ""
|
||||
|
||||
} else if !os.IsNotExist(err) {
|
||||
Debug("error reading already existing artifact metadata file: ", err.Error())
|
||||
} else if os.IsNotExist(err) {
|
||||
Debug("metadata file not present, skipping", artifactMetadataFile)
|
||||
}
|
||||
|
||||
bytes, err := cs.templatePackage(opts.BuildValues, pack)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while rendering package template")
|
||||
}
|
||||
|
||||
newSpec, err := compilerspec.NewLuetCompilationSpec(bytes, pack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newSpec.BuildOptions = opts
|
||||
|
||||
return newSpec, err
|
||||
}
|
||||
|
||||
// GetBackend returns the current compilation backend
|
||||
func (cs *LuetCompiler) GetBackend() CompilerBackend {
|
||||
return cs.Backend
|
||||
}
|
||||
|
||||
// SetBackend sets the compilation backend
|
||||
func (cs *LuetCompiler) SetBackend(b CompilerBackend) {
|
||||
cs.Backend = b
|
||||
}
|
||||
|
@@ -21,9 +21,11 @@ import (
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
sd "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -39,7 +41,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -54,12 +56,11 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -83,7 +84,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -98,12 +99,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
})
|
||||
@@ -118,7 +118,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/templates")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
pkg, err := generalRecipe.GetDatabase().FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -142,7 +142,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -159,15 +159,14 @@ var _ = Describe("Compiler", func() {
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(3))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
@@ -199,7 +198,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -207,23 +206,22 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
artifacts2, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec2))
|
||||
artifacts2, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts2)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts2 {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
|
||||
@@ -241,7 +239,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -250,15 +248,14 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
@@ -276,7 +273,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -285,15 +282,14 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
@@ -312,7 +308,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -321,15 +317,14 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
@@ -348,7 +343,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -357,14 +352,13 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
@@ -382,7 +376,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -391,14 +385,13 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
@@ -416,7 +409,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -425,14 +418,13 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
@@ -454,7 +446,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "pkgs-checker", Category: "package", Version: "9999"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -463,15 +455,14 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
@@ -495,7 +486,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -504,16 +495,15 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
@@ -539,7 +529,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -548,16 +538,15 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
@@ -581,7 +570,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -591,13 +580,13 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(2))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
@@ -619,7 +608,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(10))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -629,13 +618,13 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(6))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(6))
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("vhba-sys-fs-5.4.2-20190410.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
|
||||
@@ -658,19 +647,19 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(4))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// A deps on B, so A artifacts are here:
|
||||
@@ -710,7 +699,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -722,17 +711,16 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
for _, d := range artifact.GetDependencies() {
|
||||
Expect(helpers.Exists(d.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(d.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
for _, d := range artifact.Dependencies {
|
||||
Expect(helpers.Exists(d.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(d.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -753,7 +741,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -765,12 +753,11 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
@@ -786,7 +773,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{
|
||||
Name: "dironly",
|
||||
@@ -814,12 +801,10 @@ var _ = Describe("Compiler", func() {
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir2)
|
||||
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(2))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(0))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(0))
|
||||
|
||||
Expect(helpers.Untar(spec.Rel("dironly-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
@@ -841,11 +826,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler.SetCompressionType(GZip)
|
||||
compiler.Options.CompressionType = compression.GZip
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
@@ -853,12 +838,11 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
|
||||
Expect(artifacts[0].Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
@@ -877,7 +861,7 @@ var _ = Describe("Compiler", func() {
|
||||
err := generalRecipe.Load("../../tests/fixtures/includeimage")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
specs, err := compiler.FromDatabase(generalRecipe.GetDatabase(), true, "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -896,11 +880,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler.SetCompressionType(GZip)
|
||||
compiler.Options.CompressionType = compression.GZip
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
@@ -908,20 +892,19 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(artifacts[0].GetFiles()).To(ContainElement("bin/busybox"))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(artifacts[0].Files).To(ContainElement("bin/busybox"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
art, err := LoadArtifactFromYaml(spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
files := art.GetFiles()
|
||||
files := art.Files
|
||||
Expect(files).To(ContainElement("bin/busybox"))
|
||||
})
|
||||
})
|
||||
|
@@ -1,198 +0,0 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Compiler interface {
|
||||
Compile(bool, CompilationSpec) (Artifact, error)
|
||||
CompileParallel(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
CompileWithReverseDeps(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error)
|
||||
ComputeMinimumCompilableSet(p ...CompilationSpec) ([]CompilationSpec, error)
|
||||
SetConcurrency(i int)
|
||||
FromPackage(pkg.Package) (CompilationSpec, error)
|
||||
FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]CompilationSpec, error)
|
||||
SetBackend(CompilerBackend)
|
||||
GetBackend() CompilerBackend
|
||||
SetCompressionType(t CompressionImplementation)
|
||||
}
|
||||
|
||||
type CompilerBackendOptions struct {
|
||||
ImageName string
|
||||
SourcePath string
|
||||
DockerFileName string
|
||||
Destination string
|
||||
Context string
|
||||
}
|
||||
|
||||
type CompilerOptions struct {
|
||||
ImageRepository string
|
||||
PullFirst, KeepImg, Push bool
|
||||
Concurrency int
|
||||
CompressionType CompressionImplementation
|
||||
KeepImageExport bool
|
||||
|
||||
Wait bool
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
BuildValuesFile string
|
||||
|
||||
PackageTargetOnly bool
|
||||
}
|
||||
|
||||
func NewDefaultCompilerOptions() *CompilerOptions {
|
||||
return &CompilerOptions{
|
||||
ImageRepository: "luet/cache",
|
||||
PullFirst: false,
|
||||
Push: false,
|
||||
CompressionType: None,
|
||||
KeepImg: true,
|
||||
Concurrency: runtime.NumCPU(),
|
||||
OnlyDeps: false,
|
||||
NoDeps: false,
|
||||
}
|
||||
}
|
||||
|
||||
type CompilerBackend interface {
|
||||
BuildImage(CompilerBackendOptions) error
|
||||
ExportImage(CompilerBackendOptions) error
|
||||
RemoveImage(CompilerBackendOptions) error
|
||||
Changes(fromImage, toImage CompilerBackendOptions) ([]ArtifactLayer, error)
|
||||
ImageDefinitionToTar(CompilerBackendOptions) error
|
||||
ExtractRootfs(opts CompilerBackendOptions, keepPerms bool) error
|
||||
|
||||
CopyImage(string, string) error
|
||||
DownloadImage(opts CompilerBackendOptions) error
|
||||
|
||||
Push(opts CompilerBackendOptions) error
|
||||
ImageAvailable(string) bool
|
||||
|
||||
ImageExists(string) bool
|
||||
}
|
||||
|
||||
type Artifact interface {
|
||||
GetPath() string
|
||||
SetPath(string)
|
||||
GetDependencies() []Artifact
|
||||
SetDependencies(d []Artifact)
|
||||
GetSourceAssertion() solver.PackagesAssertions
|
||||
SetSourceAssertion(as solver.PackagesAssertions)
|
||||
|
||||
SetCompileSpec(as CompilationSpec)
|
||||
GetCompileSpec() CompilationSpec
|
||||
WriteYaml(dst string) error
|
||||
Unpack(dst string, keepPerms bool) error
|
||||
Compress(src string, concurrency int) error
|
||||
SetCompressionType(t CompressionImplementation)
|
||||
FileList() ([]string, error)
|
||||
Hash() error
|
||||
Verify() error
|
||||
|
||||
SetFiles(f []string)
|
||||
GetFiles() []string
|
||||
GetFileName() string
|
||||
|
||||
GetChecksums() Checksums
|
||||
SetChecksums(c Checksums)
|
||||
|
||||
GenerateFinalImage(string, CompilerBackend, bool) (CompilerBackendOptions, error)
|
||||
GetUncompressedName() string
|
||||
}
|
||||
|
||||
type ArtifactNode struct {
|
||||
Name string `json:"Name"`
|
||||
Size int `json:"Size"`
|
||||
}
|
||||
type ArtifactDiffs struct {
|
||||
Additions []ArtifactNode `json:"Adds"`
|
||||
Deletions []ArtifactNode `json:"Dels"`
|
||||
Changes []ArtifactNode `json:"Mods"`
|
||||
}
|
||||
type ArtifactLayer struct {
|
||||
FromImage string `json:"Image1"`
|
||||
ToImage string `json:"Image2"`
|
||||
Diffs ArtifactDiffs `json:"Diff"`
|
||||
}
|
||||
type ArtifactLayerSummary struct {
|
||||
FromImage string `json:"image1"`
|
||||
ToImage string `json:"image2"`
|
||||
AddFiles int `json:"add_files"`
|
||||
AddSizes int64 `json:"add_sizes"`
|
||||
DelFiles int `json:"del_files"`
|
||||
DelSizes int64 `json:"del_sizes"`
|
||||
ChangeFiles int `json:"change_files"`
|
||||
ChangeSizes int64 `json:"change_sizes"`
|
||||
}
|
||||
type ArtifactLayersSummary struct {
|
||||
Layers []ArtifactLayerSummary `json:"summary"`
|
||||
}
|
||||
|
||||
// CompilationSpec represent a compilation specification derived from a package
|
||||
type CompilationSpec interface {
|
||||
ImageUnpack() bool // tells if the definition is just an image
|
||||
GetIncludes() []string
|
||||
GetExcludes() []string
|
||||
|
||||
RenderBuildImage() (string, error)
|
||||
WriteBuildImageDefinition(string) error
|
||||
|
||||
RenderStepImage(image string) (string, error)
|
||||
WriteStepImageDefinition(fromimage, path string) error
|
||||
|
||||
GetPackage() pkg.Package
|
||||
BuildSteps() []string
|
||||
|
||||
GetSeedImage() string
|
||||
SetSeedImage(string)
|
||||
|
||||
GetImage() string
|
||||
SetImage(string)
|
||||
|
||||
SetOutputPath(string)
|
||||
GetOutputPath() string
|
||||
Rel(string) string
|
||||
|
||||
GetPreBuildSteps() []string
|
||||
|
||||
GetSourceAssertion() solver.PackagesAssertions
|
||||
SetSourceAssertion(as solver.PackagesAssertions)
|
||||
|
||||
GetRetrieve() []string
|
||||
CopyRetrieves(dest string) error
|
||||
|
||||
SetPackageDir(string)
|
||||
GetPackageDir() string
|
||||
|
||||
EmptyPackage() bool
|
||||
UnpackedPackage() bool
|
||||
HasImageSource() bool
|
||||
}
|
||||
|
||||
type CompilationSpecs interface {
|
||||
Unique() CompilationSpecs
|
||||
Len() int
|
||||
All() []CompilationSpec
|
||||
Add(CompilationSpec)
|
||||
Remove(s CompilationSpecs) CompilationSpecs
|
||||
}
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
@@ -36,6 +36,9 @@ import (
|
||||
"sync"
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -45,54 +48,30 @@ import (
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type CompressionImplementation string
|
||||
|
||||
const (
|
||||
None CompressionImplementation = "none" // e.g. tar for standard packages
|
||||
GZip CompressionImplementation = "gzip"
|
||||
Zstandard CompressionImplementation = "zstd"
|
||||
)
|
||||
|
||||
type ArtifactIndex []Artifact
|
||||
|
||||
func (i ArtifactIndex) CleanPath() ArtifactIndex {
|
||||
newIndex := ArtifactIndex{}
|
||||
for _, n := range i {
|
||||
art := n.(*PackageArtifact)
|
||||
// FIXME: This is a dup and makes difficult to add attributes to artifacts
|
||||
newIndex = append(newIndex, &PackageArtifact{
|
||||
Path: path.Base(n.GetPath()),
|
||||
SourceAssertion: art.SourceAssertion,
|
||||
CompileSpec: art.CompileSpec,
|
||||
Dependencies: art.Dependencies,
|
||||
CompressionType: art.CompressionType,
|
||||
Checksums: art.Checksums,
|
||||
Files: art.Files,
|
||||
})
|
||||
}
|
||||
return newIndex
|
||||
//Update if exists, otherwise just create
|
||||
}
|
||||
|
||||
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
|
||||
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
|
||||
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
|
||||
type PackageArtifact struct {
|
||||
Path string `json:"path"`
|
||||
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType CompressionImplementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType compression.Implementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
|
||||
func NewPackageArtifact(path string) Artifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: None}
|
||||
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
|
||||
copy := *p
|
||||
return ©
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
func NewPackageArtifact(path string) *PackageArtifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None}
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
|
||||
p := &PackageArtifact{Checksums: Checksums{}}
|
||||
err := yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
@@ -102,42 +81,6 @@ func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
return p, err
|
||||
}
|
||||
|
||||
func LoadArtifactFromYaml(spec CompilationSpec) (Artifact, error) {
|
||||
|
||||
metaFile := spec.GetPackage().GetFingerPrint() + ".metadata.yaml"
|
||||
dat, err := ioutil.ReadFile(spec.Rel(metaFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading file "+metaFile)
|
||||
}
|
||||
art, err := NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error writing file "+metaFile)
|
||||
}
|
||||
// It is relative, set it back to abs
|
||||
art.SetPath(spec.Rel(art.GetPath()))
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompressionType(t CompressionImplementation) {
|
||||
a.CompressionType = t
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetChecksums() Checksums {
|
||||
return a.Checksums
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetChecksums(c Checksums) {
|
||||
a.Checksums = c
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetFiles(f []string) {
|
||||
a.Files = f
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetFiles() []string {
|
||||
return a.Files
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) Hash() error {
|
||||
return a.Checksums.Generate(a)
|
||||
}
|
||||
@@ -181,8 +124,8 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
}
|
||||
//p := a.CompileSpec.GetPackage().GetPath()
|
||||
|
||||
mangle.GetCompileSpec().GetPackage().SetPath("")
|
||||
for _, ass := range mangle.GetCompileSpec().GetSourceAssertion() {
|
||||
mangle.CompileSpec.GetPackage().SetPath("")
|
||||
for _, ass := range mangle.CompileSpec.GetSourceAssertion() {
|
||||
ass.Package.SetPath("")
|
||||
}
|
||||
|
||||
@@ -191,7 +134,7 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dst, a.GetCompileSpec().GetPackage().GetFingerPrint()+".metadata.yaml"), data, os.ModePerm)
|
||||
err = ioutil.WriteFile(filepath.Join(dst, a.CompileSpec.GetPackage().GetMetadataFilePath()), data, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While writing PackageArtifact YAML")
|
||||
}
|
||||
@@ -201,66 +144,32 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetSourceAssertion() solver.PackagesAssertions {
|
||||
return a.SourceAssertion
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompileSpec(as CompilationSpec) {
|
||||
a.CompileSpec = as.(*LuetCompilationSpec)
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetCompileSpec() CompilationSpec {
|
||||
return a.CompileSpec
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
a.SourceAssertion = as
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetDependencies() []Artifact {
|
||||
ret := []Artifact{}
|
||||
for _, d := range a.Dependencies {
|
||||
ret = append(ret, d)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetDependencies(d []Artifact) {
|
||||
ret := []*PackageArtifact{}
|
||||
for _, dd := range d {
|
||||
ret = append(ret, dd.(*PackageArtifact))
|
||||
}
|
||||
a.Dependencies = ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetPath() string {
|
||||
return a.Path
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetFileName() string {
|
||||
return path.Base(a.GetPath())
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetPath(p string) {
|
||||
a.Path = p
|
||||
return path.Base(a.Path)
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) genDockerfile() string {
|
||||
return `
|
||||
FROM scratch
|
||||
COPY * /`
|
||||
COPY . /`
|
||||
}
|
||||
|
||||
// CreateArtifactForFile creates a new artifact from the given file
|
||||
func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
|
||||
|
||||
if _, err := os.Stat(s); os.IsNotExist(err) {
|
||||
return nil, errors.Wrap(err, "artifact path doesn't exist")
|
||||
}
|
||||
fileName := path.Base(s)
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
|
||||
}
|
||||
defer os.RemoveAll(archive) // clean up
|
||||
helpers.CopyFile(s, filepath.Join(archive, fileName))
|
||||
dst := filepath.Join(archive, fileName)
|
||||
if err := helpers.CopyFile(s, dst); err != nil {
|
||||
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
|
||||
}
|
||||
|
||||
artifact, err := LuetCfg.GetSystem().TempDir("artifact")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
|
||||
@@ -274,9 +183,13 @@ func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageAr
|
||||
return a, a.Compress(archive, 1)
|
||||
}
|
||||
|
||||
type ImageBuilder interface {
|
||||
BuildImage(backend.Options) error
|
||||
}
|
||||
|
||||
// GenerateFinalImage takes an artifact and builds a Docker image with its content
|
||||
func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend, keepPerms bool) (CompilerBackendOptions, error) {
|
||||
builderOpts := CompilerBackendOptions{}
|
||||
func (a *PackageArtifact) GenerateFinalImage(imageName string, b ImageBuilder, keepPerms bool) (backend.Options, error) {
|
||||
builderOpts := backend.Options{}
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while creating tempdir for "+a.Path)
|
||||
@@ -290,16 +203,28 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
return builderOpts, errors.Wrap(err, "error met while creating tempdir for "+a.Path)
|
||||
}
|
||||
|
||||
if err := a.Unpack(uncompressedFiles, keepPerms); err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while uncompressing artifact "+a.Path)
|
||||
}
|
||||
|
||||
empty, err := helpers.DirectoryIsEmpty(uncompressedFiles)
|
||||
if err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while checking if directory is empty "+uncompressedFiles)
|
||||
}
|
||||
|
||||
// See https://github.com/moby/moby/issues/38039.
|
||||
// We can't generate FROM scratch empty images. Docker will refuse to export them
|
||||
// workaround: Inject a .virtual empty file
|
||||
if empty {
|
||||
helpers.Touch(filepath.Join(uncompressedFiles, ".virtual"))
|
||||
}
|
||||
|
||||
data := a.genDockerfile()
|
||||
if err := ioutil.WriteFile(dockerFile, []byte(data), 0644); err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while rendering artifact dockerfile "+a.Path)
|
||||
}
|
||||
|
||||
if err := a.Unpack(uncompressedFiles, keepPerms); err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while uncompressing artifact "+a.Path)
|
||||
}
|
||||
|
||||
builderOpts = CompilerBackendOptions{
|
||||
builderOpts = backend.Options{
|
||||
ImageName: imageName,
|
||||
SourcePath: archive,
|
||||
DockerFileName: dockerFile,
|
||||
@@ -314,7 +239,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
switch a.CompressionType {
|
||||
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
err := helpers.Tar(src, a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -352,7 +277,7 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
|
||||
a.Path = zstdFile
|
||||
return nil
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
err := helpers.Tar(src, a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -373,7 +298,7 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
}
|
||||
// Create gzip writer.
|
||||
w := gzip.NewWriter(dst)
|
||||
w.SetConcurrency(concurrency, 10)
|
||||
w.SetConcurrency(1<<20, concurrency)
|
||||
defer w.Close()
|
||||
defer dst.Close()
|
||||
_, err = io.Copy(w, bufferedReader)
|
||||
@@ -397,10 +322,10 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
|
||||
func (a *PackageArtifact) getCompressedName() string {
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
return a.Path + ".zst"
|
||||
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
return a.Path + ".gz"
|
||||
}
|
||||
return a.Path
|
||||
@@ -409,7 +334,7 @@ func (a *PackageArtifact) getCompressedName() string {
|
||||
// GetUncompressedName returns the artifact path without the extension suffix
|
||||
func (a *PackageArtifact) GetUncompressedName() string {
|
||||
switch a.CompressionType {
|
||||
case Zstandard, GZip:
|
||||
case compression.Zstandard, compression.GZip:
|
||||
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
|
||||
}
|
||||
return a.Path
|
||||
@@ -497,13 +422,13 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
tarModifier := helpers.NewTarModifierWrapper(dst, tarModifierWrapperFunc)
|
||||
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -522,22 +447,22 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
|
||||
_, err = io.Copy(archive, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.GetPath()+".uncompressed")
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.GetPath()+".uncompressed", dst,
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -555,10 +480,10 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
|
||||
_, err = io.Copy(archive, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.GetPath()+".uncompressed")
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.GetPath()+".uncompressed", dst,
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -566,7 +491,7 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
return nil
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
return helpers.UntarProtect(a.GetPath(), dst, LuetCfg.GetGeneral().SameOwner,
|
||||
return helpers.UntarProtect(a.Path, dst, LuetCfg.GetGeneral().SameOwner,
|
||||
protectedFiles, tarModifier)
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
@@ -576,12 +501,12 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
var tr *tar.Reader
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
case compression.Zstandard:
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -597,13 +522,13 @@ func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
}
|
||||
defer r.Close()
|
||||
tr = tar.NewReader(r)
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -622,7 +547,7 @@ func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
tarFile, err := os.Open(a.GetPath())
|
||||
tarFile, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
@@ -704,8 +629,37 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
}
|
||||
}
|
||||
|
||||
func compileRegexes(regexes []string) []*regexp.Regexp {
|
||||
var result []*regexp.Regexp
|
||||
for _, i := range regexes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
result = append(result, r)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type ArtifactNode struct {
|
||||
Name string `json:"Name"`
|
||||
Size int `json:"Size"`
|
||||
}
|
||||
type ArtifactDiffs struct {
|
||||
Additions []ArtifactNode `json:"Adds"`
|
||||
Deletions []ArtifactNode `json:"Dels"`
|
||||
Changes []ArtifactNode `json:"Mods"`
|
||||
}
|
||||
|
||||
type ArtifactLayer struct {
|
||||
FromImage string `json:"Image1"`
|
||||
ToImage string `json:"Image2"`
|
||||
Diffs ArtifactDiffs `json:"Diff"`
|
||||
}
|
||||
|
||||
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t compression.Implementation) (*PackageArtifact, error) {
|
||||
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
@@ -737,15 +691,7 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
// Handle includes in spec. If specified they filter what gets in the package
|
||||
|
||||
if len(includes) > 0 && len(excludes) == 0 {
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
includeRegexp = append(includeRegexp, r)
|
||||
}
|
||||
includeRegexp := compileRegexes(includes)
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
ADDS:
|
||||
@@ -766,15 +712,7 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
}
|
||||
|
||||
} else if len(includes) == 0 && len(excludes) != 0 {
|
||||
var excludeRegexp []*regexp.Regexp
|
||||
for _, i := range excludes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
excludeRegexp = append(excludeRegexp, r)
|
||||
}
|
||||
excludeRegexp := compileRegexes(excludes)
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
ADD:
|
||||
@@ -795,25 +733,8 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
}
|
||||
|
||||
} else if len(includes) != 0 && len(excludes) != 0 {
|
||||
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
includeRegexp = append(includeRegexp, r)
|
||||
}
|
||||
var excludeRegexp []*regexp.Regexp
|
||||
for _, i := range excludes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
excludeRegexp = append(excludeRegexp, r)
|
||||
}
|
||||
includeRegexp := compileRegexes(includes)
|
||||
excludeRegexp := compileRegexes(excludes)
|
||||
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
@@ -860,45 +781,10 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
wg.Wait()
|
||||
|
||||
a := NewPackageArtifact(dst)
|
||||
a.SetCompressionType(t)
|
||||
a.CompressionType = t
|
||||
err = a.Compress(archive, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func ComputeArtifactLayerSummary(diffs []ArtifactLayer) ArtifactLayersSummary {
|
||||
|
||||
ans := ArtifactLayersSummary{
|
||||
Layers: make([]ArtifactLayerSummary, 0),
|
||||
}
|
||||
|
||||
for _, layer := range diffs {
|
||||
sum := ArtifactLayerSummary{
|
||||
FromImage: layer.FromImage,
|
||||
ToImage: layer.ToImage,
|
||||
AddFiles: 0,
|
||||
AddSizes: 0,
|
||||
DelFiles: 0,
|
||||
DelSizes: 0,
|
||||
ChangeFiles: 0,
|
||||
ChangeSizes: 0,
|
||||
}
|
||||
for _, a := range layer.Diffs.Additions {
|
||||
sum.AddFiles++
|
||||
sum.AddSizes += int64(a.Size)
|
||||
}
|
||||
for _, d := range layer.Diffs.Deletions {
|
||||
sum.DelFiles++
|
||||
sum.DelSizes += int64(d.Size)
|
||||
}
|
||||
for _, c := range layer.Diffs.Changes {
|
||||
sum.ChangeFiles++
|
||||
sum.ChangeSizes += int64(c.Size)
|
||||
}
|
||||
ans.Layers = append(ans.Layers, sum)
|
||||
}
|
||||
|
||||
return ans
|
||||
}
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
@@ -22,7 +22,10 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
@@ -43,13 +46,10 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -81,13 +81,14 @@ ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
opts := backend.Options{
|
||||
ImageName: "luet/base",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "Dockerfile",
|
||||
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
||||
}
|
||||
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
|
||||
@@ -104,15 +105,16 @@ ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts2 := CompilerBackendOptions{
|
||||
opts2 := backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
Destination: filepath.Join(tmpdir, "output2.tar"),
|
||||
}
|
||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
diffs, err := b.Changes(opts, opts2)
|
||||
diffs, err := compiler.GenerateChanges(b, opts, opts2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifacts := []ArtifactNode{{
|
||||
@@ -127,19 +129,19 @@ RUN echo bar > /test2`))
|
||||
|
||||
Expect(diffs).To(Equal(
|
||||
[]ArtifactLayer{{
|
||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
||||
FromImage: "luet/base",
|
||||
ToImage: "test",
|
||||
Diffs: ArtifactDiffs{
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false)
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: "test", Destination: rootfs}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
|
||||
a, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, compression.None)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
err = helpers.Untar(artifact.GetPath(), unpacked, false)
|
||||
err = helpers.Untar(a.Path, unpacked, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
|
||||
@@ -150,13 +152,13 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content2).To(Equal("bar\n"))
|
||||
|
||||
err = artifact.Hash()
|
||||
err = a.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = artifact.Verify()
|
||||
err = a.Verify()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
|
||||
|
||||
err = artifact.Verify()
|
||||
err = a.Verify()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -173,16 +175,21 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpWork) // clean up
|
||||
|
||||
Expect(os.MkdirAll(filepath.Join(tmpdir, "foo", "bar"), os.ModePerm)).ToNot(HaveOccurred())
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "test"), testString, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
artifact.SetCompileSpec(&LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}})
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "foo", "bar", "test"), testString, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = artifact.Compress(tmpdir, 1)
|
||||
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
|
||||
|
||||
err = a.Compress(tmpdir, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
resultingImage := imageprefix + "foo--1.0"
|
||||
opts, err := artifact.GenerateFinalImage(resultingImage, b, false)
|
||||
opts, err := a.GenerateFinalImage(resultingImage, b, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(opts.ImageName).To(Equal(resultingImage))
|
||||
|
||||
@@ -192,26 +199,69 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(result) // clean up
|
||||
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{ImageName: resultingImage, Destination: result}, false)
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
content, err := ioutil.ReadFile(filepath.Join(result, "test"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(content).To(Equal(testString))
|
||||
|
||||
content, err = ioutil.ReadFile(filepath.Join(result, "foo", "bar", "test"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(content).To(Equal(testString))
|
||||
})
|
||||
|
||||
It("Generates empty packages images", func() {
|
||||
b := NewSimpleDockerBackend()
|
||||
imageprefix := "foo/"
|
||||
|
||||
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpWork) // clean up
|
||||
|
||||
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
|
||||
|
||||
err = a.Compress(tmpdir, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
resultingImage := imageprefix + "foo--1.0"
|
||||
opts, err := a.GenerateFinalImage(resultingImage, b, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(opts.ImageName).To(Equal(resultingImage))
|
||||
|
||||
Expect(b.ImageExists(resultingImage)).To(BeTrue())
|
||||
|
||||
result, err := ioutil.TempDir(os.TempDir(), "result")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(result) // clean up
|
||||
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(result)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(result, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(string(content)).To(Equal(""))
|
||||
})
|
||||
|
||||
It("Retrieves uncompressed name", func() {
|
||||
a := NewPackageArtifact("foo.tar.gz")
|
||||
a.SetCompressionType(compiler.GZip)
|
||||
a.CompressionType = (compression.GZip)
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
|
||||
a = NewPackageArtifact("foo.tar.zst")
|
||||
a.SetCompressionType(compiler.Zstandard)
|
||||
a.CompressionType = compression.Zstandard
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
|
||||
a = NewPackageArtifact("foo.tar")
|
||||
a.SetCompressionType(compiler.None)
|
||||
a.CompressionType = compression.None
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
})
|
||||
})
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package artifact
|
||||
|
||||
import (
|
||||
|
||||
@@ -43,7 +43,7 @@ type HashOptions struct {
|
||||
}
|
||||
|
||||
// Generate generates all Checksums supported for the artifact
|
||||
func (c *Checksums) Generate(a Artifact) error {
|
||||
func (c *Checksums) Generate(a *PackageArtifact) error {
|
||||
return c.generateSHA256(a)
|
||||
}
|
||||
|
||||
@@ -56,13 +56,13 @@ func (c Checksums) Compare(d Checksums) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Checksums) generateSHA256(a Artifact) error {
|
||||
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
|
||||
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
|
||||
}
|
||||
|
||||
func (c *Checksums) generateSum(a Artifact, opts HashOptions) error {
|
||||
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
|
||||
|
||||
f, err := os.Open(a.GetPath())
|
||||
f, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@@ -13,13 +13,13 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
9
pkg/compiler/types/compression/compression.go
Normal file
9
pkg/compiler/types/compression/compression.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package compression
|
||||
|
||||
type Implementation string
|
||||
|
||||
const (
|
||||
None Implementation = "none" // e.g. tar for standard packages
|
||||
GZip Implementation = "gzip"
|
||||
Zstandard Implementation = "zstd"
|
||||
)
|
191
pkg/compiler/types/options/compiler_options.go
Normal file
191
pkg/compiler/types/options/compiler_options.go
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Compiler struct {
|
||||
PushImageRepository string
|
||||
PullImageRepository []string
|
||||
PullFirst, KeepImg, Push bool
|
||||
Concurrency int
|
||||
CompressionType compression.Implementation
|
||||
|
||||
Wait bool
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
BuildValuesFile []string
|
||||
BuildValues []map[string]interface{}
|
||||
|
||||
PackageTargetOnly bool
|
||||
|
||||
BackendArgs []string
|
||||
|
||||
BackendType string
|
||||
}
|
||||
|
||||
func NewDefaultCompiler() *Compiler {
|
||||
return &Compiler{
|
||||
PushImageRepository: "luet/cache",
|
||||
PullFirst: false,
|
||||
Push: false,
|
||||
CompressionType: compression.None,
|
||||
KeepImg: true,
|
||||
Concurrency: runtime.NumCPU(),
|
||||
OnlyDeps: false,
|
||||
NoDeps: false,
|
||||
SolverOptions: config.LuetSolverOptions{Options: solver.Options{Concurrency: 1, Type: solver.SingleCoreSimple}},
|
||||
}
|
||||
}
|
||||
|
||||
type Option func(cfg *Compiler) error
|
||||
|
||||
func (cfg *Compiler) Apply(opts ...Option) error {
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if err := opt(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithOptions(opt *Compiler) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg = opt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBackendType(r string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BackendType = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBuildValues(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BuildValuesFile = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPullRepositories(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PullImageRepository = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPushRepository(r string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
if len(cfg.PullImageRepository) == 0 {
|
||||
cfg.PullImageRepository = []string{cfg.PushImageRepository}
|
||||
}
|
||||
cfg.PushImageRepository = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func BackendArgs(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BackendArgs = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PullFirst(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PullFirst = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func KeepImg(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.KeepImg = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PushImages(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Push = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Wait(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Wait = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func OnlyDeps(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.OnlyDeps = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func OnlyTarget(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PackageTargetOnly = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NoDeps(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.NoDeps = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Concurrency(i int) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
if i == 0 {
|
||||
i = runtime.NumCPU()
|
||||
}
|
||||
cfg.Concurrency = i
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCompressionType(t compression.Implementation) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.CompressionType = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSolverOptions(c config.LuetSolverOptions) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.SolverOptions = c
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -13,12 +13,14 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package compilerspec
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/otiai10/copy"
|
||||
@@ -27,7 +29,7 @@ import (
|
||||
|
||||
type LuetCompilationspecs []LuetCompilationSpec
|
||||
|
||||
func NewLuetCompilationspecs(s ...CompilationSpec) CompilationSpecs {
|
||||
func NewLuetCompilationspecs(s ...*LuetCompilationSpec) *LuetCompilationspecs {
|
||||
all := LuetCompilationspecs{}
|
||||
|
||||
for _, spec := range s {
|
||||
@@ -40,7 +42,7 @@ func (specs LuetCompilationspecs) Len() int {
|
||||
return len(specs)
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Remove(s CompilationSpecs) CompilationSpecs {
|
||||
func (specs *LuetCompilationspecs) Remove(s *LuetCompilationspecs) *LuetCompilationspecs {
|
||||
newSpecs := LuetCompilationspecs{}
|
||||
SPECS:
|
||||
for _, spec := range specs.All() {
|
||||
@@ -54,16 +56,12 @@ SPECS:
|
||||
return &newSpecs
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Add(s CompilationSpec) {
|
||||
c, ok := s.(*LuetCompilationSpec)
|
||||
if !ok {
|
||||
panic("LuetCompilationspecs supports only []LuetCompilationSpec")
|
||||
}
|
||||
*specs = append(*specs, *c)
|
||||
func (specs *LuetCompilationspecs) Add(s *LuetCompilationSpec) {
|
||||
*specs = append(*specs, *s)
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) All() []CompilationSpec {
|
||||
var cspecs []CompilationSpec
|
||||
func (specs *LuetCompilationspecs) All() []*LuetCompilationSpec {
|
||||
var cspecs []*LuetCompilationSpec
|
||||
for i, _ := range *specs {
|
||||
f := (*specs)[i]
|
||||
cspecs = append(cspecs, &f)
|
||||
@@ -72,7 +70,7 @@ func (specs *LuetCompilationspecs) All() []CompilationSpec {
|
||||
return cspecs
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Unique() CompilationSpecs {
|
||||
func (specs *LuetCompilationspecs) Unique() *LuetCompilationspecs {
|
||||
newSpecs := LuetCompilationspecs{}
|
||||
seen := map[string]bool{}
|
||||
|
||||
@@ -103,9 +101,11 @@ type LuetCompilationSpec struct {
|
||||
Unpack bool `json:"unpack"`
|
||||
Includes []string `json:"includes"`
|
||||
Excludes []string `json:"excludes"`
|
||||
|
||||
BuildOptions options.Compiler `json:"build_options"`
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
|
||||
var spec LuetCompilationSpec
|
||||
err := yaml.Unmarshal(b, &spec)
|
||||
if err != nil {
|
||||
@@ -114,12 +114,16 @@ func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
|
||||
spec.Package = p.(*pkg.DefaultPackage)
|
||||
return &spec, nil
|
||||
}
|
||||
func (a *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
return a.SourceAssertion
|
||||
func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
return cs.SourceAssertion
|
||||
}
|
||||
|
||||
func (a *LuetCompilationSpec) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
a.SourceAssertion = as
|
||||
func (cs *LuetCompilationSpec) SetBuildOptions(b options.Compiler) {
|
||||
cs.BuildOptions = b
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
cs.SourceAssertion = as
|
||||
}
|
||||
func (cs *LuetCompilationSpec) GetPackage() pkg.Package {
|
||||
return cs.Package
|
||||
@@ -157,6 +161,12 @@ func (cs *LuetCompilationSpec) GetRetrieve() []string {
|
||||
return cs.Retrieve
|
||||
}
|
||||
|
||||
// IsVirtual returns true if the spec is virtual.
|
||||
// A spec is virtual if the package is empty, and it has no image source to unpack from.
|
||||
func (cs *LuetCompilationSpec) IsVirtual() bool {
|
||||
return cs.EmptyPackage() && !cs.HasImageSource()
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) GetSeedImage() string {
|
||||
return cs.Seed
|
||||
}
|
||||
@@ -199,8 +209,11 @@ func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
return unpack
|
||||
}
|
||||
|
||||
// HasImageSource returns true when the compilation spec has an image source.
|
||||
// a compilation spec has an image source when it depends on other packages or have a source image
|
||||
// explictly supplied
|
||||
func (cs *LuetCompilationSpec) HasImageSource() bool {
|
||||
return len(cs.GetPackage().GetRequires()) != 0 || cs.GetImage() != ""
|
||||
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != ""
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {
|
@@ -13,17 +13,18 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package compilerspec_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -32,25 +33,45 @@ import (
|
||||
var _ = Describe("Spec", func() {
|
||||
Context("Luet specs", func() {
|
||||
It("Allows normal operations", func() {
|
||||
testSpec := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec2 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "bar", Category: "a", Version: "0"}}
|
||||
testSpec3 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "baz", Category: "a", Version: "0"}}
|
||||
testSpec4 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec2 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "bar", Category: "a", Version: "0"}}
|
||||
testSpec3 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "baz", Category: "a", Version: "0"}}
|
||||
testSpec4 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
|
||||
specs := NewLuetCompilationspecs(testSpec, testSpec2)
|
||||
specs := compilerspec.NewLuetCompilationspecs(testSpec, testSpec2)
|
||||
Expect(specs.Len()).To(Equal(2))
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2}))
|
||||
specs.Add(testSpec3)
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
specs.Add(testSpec4)
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3, testSpec4}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3, testSpec4}))
|
||||
newSpec := specs.Unique()
|
||||
Expect(newSpec.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
Expect(newSpec.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
|
||||
newSpec2 := specs.Remove(NewLuetCompilationspecs(testSpec, testSpec2))
|
||||
Expect(newSpec2.All()).To(Equal([]CompilationSpec{testSpec3}))
|
||||
newSpec2 := specs.Remove(compilerspec.NewLuetCompilationspecs(testSpec, testSpec2))
|
||||
Expect(newSpec2.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec3}))
|
||||
|
||||
})
|
||||
Context("virtuals", func() {
|
||||
When("is empty", func() {
|
||||
It("is virtual", func() {
|
||||
spec := &compilerspec.LuetCompilationSpec{}
|
||||
Expect(spec.IsVirtual()).To(BeTrue())
|
||||
})
|
||||
})
|
||||
When("has defined steps", func() {
|
||||
It("is not a virtual", func() {
|
||||
spec := &compilerspec.LuetCompilationSpec{Steps: []string{"foo"}}
|
||||
Expect(spec.IsVirtual()).To(BeFalse())
|
||||
})
|
||||
})
|
||||
When("has defined image", func() {
|
||||
It("is not a virtual", func() {
|
||||
spec := &compilerspec.LuetCompilationSpec{Image: "foo"}
|
||||
Expect(spec.IsVirtual()).To(BeFalse())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Simple package build definition", func() {
|
||||
@@ -62,13 +83,10 @@ var _ = Describe("Spec", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -117,13 +135,10 @@ RUN echo bar > /test2`))
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.0"})
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
@@ -65,6 +65,7 @@ type LuetGeneralConfig struct {
|
||||
}
|
||||
|
||||
type LuetSolverOptions struct {
|
||||
solver.Options
|
||||
Type string `mapstructure:"type"`
|
||||
LearnRate float32 `mapstructure:"rate"`
|
||||
Discount float32 `mapstructure:"discount"`
|
||||
@@ -159,8 +160,9 @@ type LuetRepository struct {
|
||||
Enable bool `json:"enable" yaml:"enable" mapstructure:"enable"`
|
||||
Cached bool `json:"cached,omitempty" yaml:"cached,omitempty" mapstructure:"cached,omitempty"`
|
||||
Authentication map[string]string `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
|
||||
TreePath string `json:"tree_path,omitempty" yaml:"tree_path,omitempty" mapstructure:"tree_path"`
|
||||
MetaPath string `json:"meta_path,omitempty" yaml:"meta_path,omitempty" mapstructure:"meta_path"`
|
||||
TreePath string `json:"treepath,omitempty" yaml:"treepath,omitempty" mapstructure:"treepath"`
|
||||
MetaPath string `json:"metapath,omitempty" yaml:"metapath,omitempty" mapstructure:"metapath"`
|
||||
Verify bool `json:"verify,omitempty" yaml:"verify,omitempty" mapstructure:"verify"`
|
||||
|
||||
// Serialized options not used in repository configuration
|
||||
|
||||
|
@@ -95,9 +95,7 @@ func UntarProtect(src, dst string, sameOwner bool, protectedFiles []string, modi
|
||||
replacerArchive := archive.ReplaceFileTarWrapper(in, mods)
|
||||
|
||||
opts := &archive.TarOptions{
|
||||
// NOTE: NoLchown boolean is used for chmod of the symlink
|
||||
// Probably it's needed set this always to true.
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
ContinueOnError: true,
|
||||
}
|
||||
@@ -201,12 +199,8 @@ func Untar(src, dest string, sameOwner bool) error {
|
||||
defer in.Close()
|
||||
|
||||
if sameOwner {
|
||||
// PRE: i have root privileged.
|
||||
|
||||
opts := &archive.TarOptions{
|
||||
// NOTE: NoLchown boolean is used for chmod of the symlink
|
||||
// Probably it's needed set this always to true.
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
ContinueOnError: true,
|
||||
}
|
||||
|
124
pkg/helpers/docker.go
Normal file
124
pkg/helpers/docker.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// See also https://github.com/docker/cli/blob/88c6089300a82d3373892adf6845a4fed1a4ba8d/cli/command/image/trust.go#L171
|
||||
|
||||
func verifyImage(image string, authConfig *types.AuthConfig) (string, error) {
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "invalid reference %s", image)
|
||||
}
|
||||
|
||||
// only check if image ref doesn't contain hashes
|
||||
if _, ok := ref.(reference.Digested); !ok {
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not named")
|
||||
}
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not tagged")
|
||||
}
|
||||
|
||||
resolvedImage, err := trustedResolveDigest(context.Background(), taggedRef, authConfig, "luet")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to resolve image digest using content trust")
|
||||
}
|
||||
resolvedFamiliar := reference.FamiliarString(resolvedImage)
|
||||
return resolvedFamiliar, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func trustedResolveDigest(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig, useragent string) (reference.Canonical, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(os.Stdin, os.Stdout, useragent, repoInfo, authConfig, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref)))
|
||||
}
|
||||
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return nil, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
|
||||
|
||||
// Allow returning canonical reference with tag
|
||||
return reference.WithDigest(ref, dgst)
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage is a re-adaption
|
||||
// from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
|
||||
if verify {
|
||||
img, err := verifyImage(image, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed verifying image")
|
||||
}
|
||||
image = img
|
||||
}
|
||||
|
||||
defer os.RemoveAll(temp)
|
||||
c, err := imgworker.New(temp, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed listing images")
|
||||
|
||||
}
|
||||
|
||||
os.RemoveAll(dest)
|
||||
err = c.Unpack(image, dest)
|
||||
return listedImage, err
|
||||
}
|
@@ -16,16 +16,52 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/google/renameio"
|
||||
copy "github.com/otiai10/copy"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
|
||||
func RandStringRunes(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func Move(src, dst string) error {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t, err := renameio.TempFile("", dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.Cleanup()
|
||||
|
||||
_, err = io.Copy(t, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.CloseAtomicallyReplace()
|
||||
}
|
||||
|
||||
func OrderFiles(target string, files []string) ([]string, []string) {
|
||||
|
||||
var newFiles []string
|
||||
@@ -80,6 +116,20 @@ func ListDir(dir string) ([]string, error) {
|
||||
return content, err
|
||||
}
|
||||
|
||||
// DirectoryIsEmpty Checks wether the directory is empty or not
|
||||
func DirectoryIsEmpty(dir string) (bool, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err = f.Readdirnames(1); err == io.EOF {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Touch creates an empty file
|
||||
func Touch(f string) error {
|
||||
_, err := os.Stat(f)
|
||||
@@ -134,7 +184,39 @@ func EnsureDir(fileName string) error {
|
||||
// of the source file. The file mode will be copied from the source and
|
||||
// the copied data is synced/flushed to stable storage.
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
return copy.Copy(src, dst, copy.Options{OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
// Workaround for https://github.com/otiai10/copy/issues/47
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading file info")
|
||||
}
|
||||
|
||||
fm := fi.Mode()
|
||||
switch {
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
EnsureDir(dst)
|
||||
if err := syscall.Mkfifo(dst, uint32(fi.Mode())); err != nil {
|
||||
return errors.Wrap(err, "failed creating pipe")
|
||||
}
|
||||
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
if err := os.Chown(dst, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return errors.Wrap(err, "failed chowning file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
if err := os.Lchown(dst, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
fmt.Println("warning: failed chowning", dst, err.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
||||
@@ -151,5 +233,7 @@ func IsDirectory(path string) (bool, error) {
|
||||
func CopyDir(src string, dst string) (err error) {
|
||||
src = filepath.Clean(src)
|
||||
dst = filepath.Clean(dst)
|
||||
return copy.Copy(src, dst, copy.Options{OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
return copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
}
|
||||
|
@@ -33,6 +33,23 @@ var _ = Describe("Helpers", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("DirectoryIsEmpty", func() {
|
||||
It("Detects empty directory", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeTrue())
|
||||
})
|
||||
It("Detects directory with files", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
err = Touch(filepath.Join(testDir, "foo"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Orders dir and files correctly", func() {
|
||||
It("puts files first and folders at end", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
|
@@ -3,6 +3,7 @@ package helpers
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
@@ -37,7 +38,38 @@ func RenderHelm(template string, values, d map[string]interface{}) (string, erro
|
||||
|
||||
type templatedata map[string]interface{}
|
||||
|
||||
func RenderFiles(toTemplate, valuesFile string, defaultFile string) (string, error) {
|
||||
// UnMarshalValues unmarshal values files and joins them into a unique templatedata
|
||||
// the join happens from right to left, so any rightmost value file overwrites the content of the ones before it.
|
||||
func UnMarshalValues(values []string) (templatedata, error) {
|
||||
dst := templatedata{}
|
||||
if len(values) > 0 {
|
||||
for _, bv := range reverse(values) {
|
||||
current := templatedata{}
|
||||
|
||||
defBuild, err := ioutil.ReadFile(bv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+bv)
|
||||
}
|
||||
err = yaml.Unmarshal(defBuild, ¤t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+bv)
|
||||
}
|
||||
if err := mergo.Merge(&dst, current); err != nil {
|
||||
return nil, errors.Wrap(err, "merging values file "+bv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func reverse(s []string) []string {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func RenderFiles(toTemplate, valuesFile string, defaultFile ...string) (string, error) {
|
||||
raw, err := ioutil.ReadFile(toTemplate)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+toTemplate)
|
||||
@@ -52,20 +84,14 @@ func RenderFiles(toTemplate, valuesFile string, defaultFile string) (string, err
|
||||
}
|
||||
|
||||
var values templatedata
|
||||
d := templatedata{}
|
||||
if len(defaultFile) > 0 {
|
||||
def, err := ioutil.ReadFile(defaultFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+valuesFile)
|
||||
}
|
||||
if err = yaml.Unmarshal(def, &d); err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
|
||||
}
|
||||
}
|
||||
|
||||
if err = yaml.Unmarshal(val, &values); err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
|
||||
}
|
||||
|
||||
return RenderHelm(string(raw), values, d)
|
||||
dst, err := UnMarshalValues(defaultFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
|
||||
return RenderHelm(string(raw), values, dst)
|
||||
}
|
||||
|
@@ -114,11 +114,46 @@ foo: "bar"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, "")
|
||||
res, err := RenderFiles(toTemplate, values)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bar"))
|
||||
})
|
||||
|
||||
It("Render files merging defaults", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
toTemplate := filepath.Join(testDir, "totemplate.yaml")
|
||||
values := filepath.Join(testDir, "values.yaml")
|
||||
d := filepath.Join(testDir, "default.yaml")
|
||||
d2 := filepath.Join(testDir, "default2.yaml")
|
||||
|
||||
writeFile(toTemplate, `{{.Values.foo}}{{.Values.bar}}{{.Values.b}}`)
|
||||
writeFile(values, `
|
||||
foo: "bar"
|
||||
b: "f"
|
||||
`)
|
||||
writeFile(d, `
|
||||
foo: "baz"
|
||||
`)
|
||||
|
||||
writeFile(d2, `
|
||||
foo: "do"
|
||||
bar: "nei"
|
||||
`)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d2, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bazneif"))
|
||||
|
||||
res, err = RenderFiles(toTemplate, values, d, d2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("doneif"))
|
||||
})
|
||||
|
||||
It("doesn't interpolate if no one provides the values", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
36
pkg/helpers/imgworker/auth.go
Normal file
36
pkg/helpers/imgworker/auth.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package imgworker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/auth"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func NewDockerAuthProvider(auth *types.AuthConfig) session.Attachable {
|
||||
return &authProvider{
|
||||
config: auth,
|
||||
}
|
||||
}
|
||||
|
||||
type authProvider struct {
|
||||
config *types.AuthConfig
|
||||
}
|
||||
|
||||
func (ap *authProvider) Register(server *grpc.Server) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
||||
res := &auth.CredentialsResponse{}
|
||||
if ap.config.IdentityToken != "" {
|
||||
res.Secret = ap.config.IdentityToken
|
||||
} else {
|
||||
res.Username = ap.config.Username
|
||||
res.Secret = ap.config.Password
|
||||
}
|
||||
return res, nil
|
||||
}
|
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/genuinetools/img/types"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -29,10 +30,11 @@ type Client struct {
|
||||
|
||||
sess *session.Session
|
||||
ctx context.Context
|
||||
auth *dockertypes.AuthConfig
|
||||
}
|
||||
|
||||
// New returns a new client for communicating with the buildkit controller.
|
||||
func New(root string) (*Client, error) {
|
||||
func New(root string, auth *dockertypes.AuthConfig) (*Client, error) {
|
||||
// Native backend is fine, our images have just one layer. No need to depend on anything
|
||||
backend := types.NativeBackend
|
||||
|
||||
@@ -45,6 +47,7 @@ func New(root string) (*Client, error) {
|
||||
backend: types.NativeBackend,
|
||||
root: root,
|
||||
localDirs: nil,
|
||||
auth: auth,
|
||||
}
|
||||
|
||||
if err := c.prepare(); err != nil {
|
@@ -31,6 +31,7 @@ func (c *Client) Pull(image string) (*ListedImage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the image name and tag.
|
||||
named, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
@@ -114,7 +115,6 @@ func (c *Client) Pull(image string) (*ListedImage, error) {
|
||||
if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the image.
|
||||
img, err := opt.ImageStore.Get(ctx, image)
|
||||
if err != nil {
|
@@ -4,10 +4,8 @@ package imgworker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/session/testutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -31,7 +29,7 @@ func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer,
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session manager")
|
||||
}
|
||||
sessionName := "img"
|
||||
sessionName := "luet"
|
||||
s, err := session.NewSession(ctx, sessionName, "")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session")
|
||||
@@ -41,7 +39,7 @@ func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer,
|
||||
syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d})
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
s.Allow(NewDockerAuthProvider(c.auth))
|
||||
return s, sessionDialer(s, m), err
|
||||
}
|
||||
|
@@ -71,7 +71,7 @@ func (c *Client) Unpack(image, dest string) error {
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
|
30
pkg/helpers/references.go
Normal file
30
pkg/helpers/references.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// David Cassany <dcassany@suse.com>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/asaskevich/govalidator"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func StripRegistryFromImage(image string) string {
|
||||
img := strings.SplitN(image, "/", 2)
|
||||
if len(img) == 2 && govalidator.IsURL(img[0]) {
|
||||
return img[1]
|
||||
}
|
||||
return image
|
||||
}
|
44
pkg/helpers/references_test.go
Normal file
44
pkg/helpers/references_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// David Cassany <dcassany@suse.com>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Helpers", func() {
|
||||
Context("StripRegistryFromImage", func() {
|
||||
It("Strips the domain name", func() {
|
||||
out := StripRegistryFromImage("valid.domain.org/base/image:tag")
|
||||
Expect(out).To(Equal("base/image:tag"))
|
||||
})
|
||||
It("Strips the domain name when port is included", func() {
|
||||
out := StripRegistryFromImage("valid.domain.org:5000/base/image:tag")
|
||||
Expect(out).To(Equal("base/image:tag"))
|
||||
})
|
||||
It("Does not strip the domain name", func() {
|
||||
out := StripRegistryFromImage("not-a-domain/base/image:tag")
|
||||
Expect(out).To(Equal("not-a-domain/base/image:tag"))
|
||||
})
|
||||
It("Does not strip the domain name on invalid domains", func() {
|
||||
out := StripRegistryFromImage("-invaliddomain.org/base/image:tag")
|
||||
Expect(out).To(Equal("-invaliddomain.org/base/image:tag"))
|
||||
})
|
||||
})
|
||||
})
|
@@ -16,63 +16,51 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
imgworker "github.com/mudler/luet/pkg/installer/client/imgworker"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
errImageDownloadMsg = "failed downloading image %s: %s"
|
||||
)
|
||||
|
||||
type DockerClient struct {
|
||||
RepoData RepoData
|
||||
auth *types.AuthConfig
|
||||
verify bool
|
||||
}
|
||||
|
||||
func NewDockerClient(r RepoData) *DockerClient {
|
||||
return &DockerClient{RepoData: r}
|
||||
auth := &types.AuthConfig{}
|
||||
|
||||
dat, _ := json.Marshal(r.Authentication)
|
||||
json.Unmarshal(dat, auth)
|
||||
|
||||
return &DockerClient{RepoData: r, auth: auth}
|
||||
}
|
||||
|
||||
func downloadAndExtractDockerImage(image, dest string) error {
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
Debug("Temporary directory", temp)
|
||||
c, err := imgworker.New(temp)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go
|
||||
Debug("Pulling image", image)
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed listing images")
|
||||
|
||||
}
|
||||
Debug("Pulled:", listedImage.Target.Digest)
|
||||
Debug("Size:", units.BytesSize(float64(listedImage.ContentSize)))
|
||||
Debug("Unpacking", image, "to", dest)
|
||||
os.RemoveAll(dest)
|
||||
return c.Unpack(image, dest)
|
||||
}
|
||||
|
||||
func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
//var u *url.URL = nil
|
||||
var err error
|
||||
var temp string
|
||||
var resultingArtifact compiler.Artifact
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
var resultingArtifact *artifact.PackageArtifact
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
Debug("Cache file", cacheFile)
|
||||
if err := helpers.EnsureDir(cacheFile); err != nil {
|
||||
@@ -90,9 +78,9 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
Debug("Cache hit for artifact", artifactName)
|
||||
resultingArtifact = artifact
|
||||
resultingArtifact.SetPath(cacheFile)
|
||||
resultingArtifact.SetChecksums(compiler.Checksums{})
|
||||
resultingArtifact = a
|
||||
resultingArtifact.Path = cacheFile
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
} else {
|
||||
|
||||
temp, err = config.LuetCfg.GetSystem().TempDir("tree")
|
||||
@@ -102,23 +90,32 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
Debug("Downloading artifact", artifactName, "from", uri)
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, artifact.GetCompileSpec().GetPackage().GetFingerPrint())
|
||||
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
|
||||
Info("Downloading image", imageName)
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
err = downloadAndExtractDockerImage(imageName, temp)
|
||||
contentstore, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Debug("Failed download of image", imageName)
|
||||
Warning("Cannot create contentstore", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
info, err := helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
Debug("\nCompressing result ", filepath.Join(temp), "to", cacheFile)
|
||||
|
||||
newart := artifact
|
||||
newart := a
|
||||
// We discard checksum, that are checked while during pull and unpack
|
||||
newart.SetChecksums(compiler.Checksums{})
|
||||
newart.SetPath(cacheFile) // First set to cache file
|
||||
newart.SetPath(newart.GetUncompressedName()) // Calculate the real path from cacheFile
|
||||
newart.Checksums = artifact.Checksums{}
|
||||
newart.Path = cacheFile // First set to cache file
|
||||
newart.Path = newart.GetUncompressedName() // Calculate the real path from cacheFile
|
||||
err = newart.Compress(temp, 1)
|
||||
if err != nil {
|
||||
Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
|
||||
@@ -158,16 +155,24 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
Debug("Downloading file", name, "from", uri)
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, name)
|
||||
//imageName := fmt.Sprintf("%s/%s:%s", uri, "repository", name)
|
||||
err = downloadAndExtractDockerImage(imageName, temp)
|
||||
contentstore, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Debug("Failed download of image", imageName)
|
||||
Warning("Cannot create contentstore", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, name)
|
||||
Info("Downloading", imageName)
|
||||
|
||||
info, err := helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, name), "to", file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
|
||||
|
@@ -20,7 +20,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
@@ -54,9 +56,9 @@ var _ = Describe("Docker client", func() {
|
||||
})
|
||||
|
||||
It("Downloads artifacts", func() {
|
||||
f, err := c.DownloadArtifact(&compiler.PackageArtifact{
|
||||
f, err := c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "c",
|
||||
Category: "test",
|
||||
@@ -71,7 +73,7 @@ var _ = Describe("Docker client", func() {
|
||||
Expect(f.Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "c"))).To(Equal("c\n"))
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "cd"))).To(Equal("c\n"))
|
||||
os.RemoveAll(f.GetPath())
|
||||
os.RemoveAll(f.Path)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -24,9 +24,9 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
@@ -66,13 +66,13 @@ func Round(input float64) float64 {
|
||||
return math.Floor(input + 0.5)
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
var u *url.URL = nil
|
||||
var err error
|
||||
var req *grab.Request
|
||||
var temp string
|
||||
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
ok := false
|
||||
|
||||
@@ -168,8 +168,8 @@ func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Arti
|
||||
}
|
||||
}
|
||||
|
||||
newart := artifact
|
||||
newart.SetPath(cacheFile)
|
||||
newart := a
|
||||
newart.Path = cacheFile
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
@@ -63,10 +63,10 @@ var _ = Describe("Http client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewHttpClient(RepoData{Urls: []string{ts.URL}})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
Expect(helpers.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -18,4 +18,5 @@ package client
|
||||
type RepoData struct {
|
||||
Urls []string
|
||||
Authentication map[string]string
|
||||
Verify bool
|
||||
}
|
||||
|
@@ -20,10 +20,10 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
)
|
||||
|
||||
@@ -35,11 +35,11 @@ func NewLocalClient(r RepoData) *LocalClient {
|
||||
return &LocalClient{RepoData: r}
|
||||
}
|
||||
|
||||
func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
var err error
|
||||
|
||||
rootfs := ""
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
|
||||
if !config.LuetCfg.ConfigFromHost {
|
||||
@@ -74,8 +74,8 @@ func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Art
|
||||
}
|
||||
}
|
||||
|
||||
newart := artifact
|
||||
newart.SetPath(cacheFile)
|
||||
newart := a
|
||||
newart.Path = cacheFile
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
@@ -56,10 +56,10 @@ var _ = Describe("Local client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewLocalClient(RepoData{Urls: []string{tmpdir}})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
Expect(helpers.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -16,6 +16,7 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@@ -48,7 +49,7 @@ func (f *LuetFinalizer) RunInstall(s *System) error {
|
||||
for _, c := range f.Install {
|
||||
toRun := append(args, c)
|
||||
Info(":shell: Executing finalizer on ", s.Target, cmd, toRun)
|
||||
if s.Target == "/" {
|
||||
if s.Target == string(os.PathSeparator) {
|
||||
cmd := exec.Command(cmd, toRun...)
|
||||
stdoutStderr, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
|
@@ -24,9 +24,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/logrusorgru/aurora"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -47,6 +48,7 @@ type LuetInstallerOptions struct {
|
||||
CheckConflicts bool
|
||||
SolverUpgrade, RemoveUnavailableOnUpgrade, UpgradeNewRevisions bool
|
||||
Ask bool
|
||||
DownloadOnly bool
|
||||
}
|
||||
|
||||
type LuetInstaller struct {
|
||||
@@ -57,11 +59,11 @@ type LuetInstaller struct {
|
||||
|
||||
type ArtifactMatch struct {
|
||||
Package pkg.Package
|
||||
Artifact compiler.Artifact
|
||||
Repository Repository
|
||||
Artifact *artifact.PackageArtifact
|
||||
Repository *LuetSystemRepository
|
||||
}
|
||||
|
||||
func NewLuetInstaller(opts LuetInstallerOptions) Installer {
|
||||
func NewLuetInstaller(opts LuetInstallerOptions) *LuetInstaller {
|
||||
return &LuetInstaller{Options: opts}
|
||||
}
|
||||
|
||||
@@ -105,11 +107,11 @@ func (l *LuetInstaller) computeUpgrade(syncedRepos Repositories, s *System) (pkg
|
||||
continue
|
||||
}
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
if artefact.CompileSpec.GetPackage() == nil {
|
||||
return uninstall, toInstall, errors.New("Package in compilespec empty")
|
||||
|
||||
}
|
||||
if artefact.GetCompileSpec().GetPackage().Matches(p) && artefact.GetCompileSpec().GetPackage().GetBuildTimestamp() != p.GetBuildTimestamp() {
|
||||
if artefact.CompileSpec.GetPackage().Matches(p) && artefact.CompileSpec.GetPackage().GetBuildTimestamp() != p.GetBuildTimestamp() {
|
||||
toInstall = append(toInstall, matches[0].Package).Unique()
|
||||
uninstall = append(uninstall, p).Unique()
|
||||
}
|
||||
@@ -151,39 +153,7 @@ func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
Info(":memo: note: will consider new build revisions while upgrading")
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(syncedRepos, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
return l.checkAndUpgrade(syncedRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) SyncRepositories(inMemory bool) (Repositories, error) {
|
||||
@@ -304,6 +274,9 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
||||
if err := l.download(syncedRepos, match); err != nil {
|
||||
return errors.Wrap(err, "Pre-downloading packages")
|
||||
}
|
||||
if l.Options.DownloadOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = l.Uninstall(s, toRemove...)
|
||||
if err != nil && !l.Options.Force {
|
||||
@@ -316,12 +289,55 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(r, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(r, uninstall, toInstall, s, true)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
return l.swap(r, uninstall, toInstall, s, true)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
syncedRepos, err := l.SyncRepositories(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(s.Database.World()) > 0 {
|
||||
Info(":thinking: Checking for available upgrades")
|
||||
if err := l.checkAndUpgrade(syncedRepos, s); err != nil {
|
||||
return errors.Wrap(err, "while checking upgrades before install")
|
||||
}
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, cp, s)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -402,12 +418,12 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
for _, repo := range syncedRepos {
|
||||
for _, artefact := range repo.GetIndex() {
|
||||
Debug("Checking if",
|
||||
artefact.GetCompileSpec().GetPackage().HumanReadableString(),
|
||||
artefact.CompileSpec.GetPackage().HumanReadableString(),
|
||||
"from", repo.GetName(), "is installed")
|
||||
FILES:
|
||||
for _, f := range artefact.GetFiles() {
|
||||
for _, f := range artefact.Files {
|
||||
if helpers.Exists(filepath.Join(s.Target, f)) {
|
||||
p, err := repo.GetTree().GetDatabase().FindPackage(artefact.GetCompileSpec().GetPackage())
|
||||
p, err := repo.GetTree().GetDatabase().FindPackage(artefact.CompileSpec.GetPackage())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -431,7 +447,7 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed creating package")
|
||||
}
|
||||
s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: pack.GetFingerPrint(), Files: match.Artifact.GetFiles()})
|
||||
s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: pack.GetFingerPrint(), Files: match.Artifact.Files})
|
||||
Info(":zap:Reclaimed package:", pack.HumanReadableString())
|
||||
}
|
||||
Info("Done!")
|
||||
@@ -508,11 +524,11 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
}
|
||||
A:
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
if artefact.CompileSpec.GetPackage() == nil {
|
||||
return toInstall, p, solution, allRepos, errors.New("Package in compilespec empty")
|
||||
}
|
||||
if matches[0].Package.Matches(artefact.GetCompileSpec().GetPackage()) {
|
||||
currentPack.SetBuildTimestamp(artefact.GetCompileSpec().GetPackage().GetBuildTimestamp())
|
||||
if matches[0].Package.Matches(artefact.CompileSpec.GetPackage()) {
|
||||
currentPack.SetBuildTimestamp(artefact.CompileSpec.GetPackage().GetBuildTimestamp())
|
||||
// Filter out already installed
|
||||
if _, err := s.Database.FindPackage(currentPack); err != nil {
|
||||
toInstall[currentPack.GetFingerPrint()] = ArtifactMatch{Package: currentPack, Artifact: artefact, Repository: matches[0].Repo}
|
||||
@@ -530,6 +546,10 @@ func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]A
|
||||
return errors.Wrap(err, "Downloading packages")
|
||||
}
|
||||
|
||||
if l.Options.DownloadOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
all := make(chan ArtifactMatch)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
@@ -594,7 +614,7 @@ func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]A
|
||||
return s.ExecuteFinalizers(toFinalize)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, error) {
|
||||
func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (*artifact.PackageArtifact, error) {
|
||||
|
||||
artifact, err := a.Repository.Client().DownloadArtifact(a.Artifact)
|
||||
if err != nil {
|
||||
@@ -608,26 +628,26 @@ func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, err
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) installPackage(a ArtifactMatch, s *System) error {
|
||||
func (l *LuetInstaller) installPackage(m ArtifactMatch, s *System) error {
|
||||
|
||||
artifact, err := l.downloadPackage(a)
|
||||
a, err := l.downloadPackage(m)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
|
||||
files, err := artifact.FileList()
|
||||
files, err := a.FileList()
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
|
||||
err = artifact.Unpack(s.Target, true)
|
||||
err = a.Unpack(s.Target, true)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
// First create client and download
|
||||
// Then unpack to system
|
||||
return s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: a.Package.GetFingerPrint(), Files: files})
|
||||
return s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: m.Package.GetFingerPrint(), Files: files})
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) downloadWorker(i int, wg *sync.WaitGroup, c <-chan ArtifactMatch) error {
|
||||
@@ -847,4 +867,4 @@ func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
return uninstall()
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Repositories(r []Repository) { l.PackageRepositories = r }
|
||||
func (l *LuetInstaller) Repositories(r []*LuetSystemRepository) { l.PackageRepositories = r }
|
||||
|
@@ -23,9 +23,10 @@ import (
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
solver "github.com/mudler/luet/pkg/solver"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -34,7 +35,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func stubRepo(tmpdir, tree string) (installer.Repository, error) {
|
||||
func stubRepo(tmpdir, tree string) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
@@ -43,10 +44,11 @@ func stubRepo(tmpdir, tree string) (installer.Repository, error) {
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
}
|
||||
|
||||
var _ = Describe("Installer", func() {
|
||||
|
||||
Context("Writes a repository definition", func() {
|
||||
It("Writes a repo and can install packages from it", func() {
|
||||
//repo:=NewLuetSystemRepository()
|
||||
@@ -62,7 +64,9 @@ var _ = Describe("Installer", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -77,12 +81,11 @@ var _ = Describe("Installer", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
a, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -178,7 +181,7 @@ urls:
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -193,12 +196,11 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -217,7 +219,7 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
treeFile := NewDefaultTreeRepositoryFile()
|
||||
treeFile.SetCompressionType(compiler.None)
|
||||
treeFile.SetCompressionType(compression.None)
|
||||
repo.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
@@ -298,7 +300,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -313,12 +316,11 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -340,7 +342,7 @@ urls:
|
||||
[]string{tmpdir}, 1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
@@ -423,7 +425,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -438,12 +441,11 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -466,7 +468,7 @@ urls:
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
@@ -518,7 +520,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err = c.FromPackage(&pkg.DefaultPackage{Name: "alpine", Category: "seed", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -526,11 +528,10 @@ urls:
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
spec.SetOutputPath(tmpdir2)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err = c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
|
||||
repo, err = stubRepo(tmpdir2, "../../tests/fixtures/alpine")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -577,7 +578,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -595,9 +596,8 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
@@ -695,8 +695,8 @@ urls:
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipeNewRepo.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipeNewRepo.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
c2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipeNewRepo.GetDatabase())
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -718,13 +718,12 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdirnewrepo)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
_, errs = c2.CompileParallel(false, compiler.NewLuetCompilationspecs(spec2))
|
||||
_, errs = c2.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2))
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
|
||||
@@ -827,7 +826,12 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(
|
||||
backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2),
|
||||
options.WithCompressionType(compression.GZip),
|
||||
)
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -844,9 +848,8 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
@@ -985,7 +988,9 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2),
|
||||
options.WithCompressionType(compression.GZip))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1002,9 +1007,7 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
@@ -1090,7 +1093,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.WithCompressionType(compression.GZip))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1104,9 +1108,7 @@ urls:
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(1)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
@@ -1182,7 +1184,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase())
|
||||
|
||||
spec, err = c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1192,7 +1194,7 @@ urls:
|
||||
defer os.RemoveAll(tmpdir2) // clean up
|
||||
spec.SetOutputPath(tmpdir2)
|
||||
|
||||
_, errs = c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec))
|
||||
_, errs = c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
|
@@ -16,62 +16,13 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
//"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Installer interface {
|
||||
Install(pkg.Packages, *System) error
|
||||
Uninstall(*System, ...pkg.Package) error
|
||||
Upgrade(s *System) error
|
||||
Reclaim(s *System) error
|
||||
|
||||
Repositories([]Repository)
|
||||
SyncRepositories(bool) (Repositories, error)
|
||||
Swap(pkg.Packages, pkg.Packages, *System) error
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
DownloadArtifact(compiler.Artifact) (compiler.Artifact, error)
|
||||
DownloadArtifact(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
|
||||
DownloadFile(string) (string, error)
|
||||
}
|
||||
|
||||
type Repositories []Repository
|
||||
|
||||
type Repository interface {
|
||||
GetName() string
|
||||
GetDescription() string
|
||||
GetUrls() []string
|
||||
SetUrls([]string)
|
||||
AddUrl(string)
|
||||
GetPriority() int
|
||||
GetIndex() compiler.ArtifactIndex
|
||||
SetIndex(i compiler.ArtifactIndex)
|
||||
GetTree() tree.Builder
|
||||
SetTree(tree.Builder)
|
||||
Write(path string, resetRevision, force bool) error
|
||||
Sync(bool) (Repository, error)
|
||||
GetTreePath() string
|
||||
SetTreePath(string)
|
||||
GetMetaPath() string
|
||||
SetMetaPath(string)
|
||||
GetType() string
|
||||
SetType(string)
|
||||
SetAuthentication(map[string]string)
|
||||
GetAuthentication() map[string]string
|
||||
GetRevision() int
|
||||
IncrementRevision()
|
||||
GetLastUpdate() string
|
||||
SetLastUpdate(string)
|
||||
Client() Client
|
||||
|
||||
SetPriority(int)
|
||||
GetRepositoryFile(string) (LuetRepositoryFile, error)
|
||||
SetRepositoryFile(string, LuetRepositoryFile)
|
||||
SetName(p string)
|
||||
Serialize() (*LuetSystemRepositoryMetadata, LuetSystemRepositorySerialized)
|
||||
GetBackend() compiler.CompilerBackend
|
||||
SetBackend(b compiler.CompilerBackend)
|
||||
}
|
||||
type Repositories []*LuetSystemRepository
|
||||
|
File diff suppressed because it is too large
Load Diff
270
pkg/installer/repository_docker.go
Normal file
270
pkg/installer/repository_docker.go
Normal file
@@ -0,0 +1,270 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerRepositoryGenerator struct {
|
||||
b compiler.CompilerBackend
|
||||
imagePrefix string
|
||||
imagePush, force bool
|
||||
}
|
||||
|
||||
func (l *dockerRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
|
||||
Info("Generating docker images for packages in", l.imagePrefix)
|
||||
var art []*artifact.PackageArtifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
Debug("Skipping", info.Name(), err.Error())
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
Debug("Skipping directories")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".metadata.yaml") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := l.pushImageFromArtifact(artifact.NewPackageArtifact(currentpath), l.b); err != nil {
|
||||
return errors.Wrap(err, "while pushing metadata file associated to the artifact")
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
a, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
// Set the path relative to the file.
|
||||
// The metadata contains the full path where the file was located during buildtime.
|
||||
a.Path = filepath.Join(filepath.Dir(currentpath), filepath.Base(a.Path))
|
||||
|
||||
// We want to include packages that are ONLY referenced in the tree.
|
||||
// the ones which aren't should be deleted. (TODO: by another cli command?)
|
||||
if _, notfound := db.FindPackage(a.CompileSpec.Package); notfound != nil {
|
||||
Debug(fmt.Sprintf("Package %s not found in tree. Ignoring it.",
|
||||
a.CompileSpec.Package.HumanReadableString()))
|
||||
return nil
|
||||
}
|
||||
|
||||
packageImage := fmt.Sprintf("%s:%s", l.imagePrefix, a.CompileSpec.GetPackage().ImageID())
|
||||
|
||||
if l.imagePush && l.b.ImageAvailable(packageImage) && !l.force {
|
||||
Info("Image", packageImage, "already present, skipping. use --force-push to override")
|
||||
} else {
|
||||
Info("Generating final image", packageImage,
|
||||
"for package ", a.CompileSpec.GetPackage().HumanReadableString())
|
||||
if opts, err := a.GenerateFinalImage(packageImage, l.b, true); err != nil {
|
||||
return errors.Wrap(err, "Failed generating metadata tree"+opts.ImageName)
|
||||
}
|
||||
}
|
||||
if l.imagePush {
|
||||
if err := pushImage(l.b, packageImage, l.force); err != nil {
|
||||
return errors.Wrapf(err, "Failed while pushing image: '%s'", packageImage)
|
||||
}
|
||||
}
|
||||
|
||||
art = append(art, a)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func pushImage(b compiler.CompilerBackend, image string, force bool) error {
|
||||
if b.ImageAvailable(image) && !force {
|
||||
Debug("Image", image, "already present, skipping")
|
||||
return nil
|
||||
}
|
||||
return b.Push(backend.Options{ImageName: image})
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushFileFromArtifact(a *artifact.PackageArtifact, imageTree string) error {
|
||||
Debug("Generating image", imageTree)
|
||||
if opts, err := a.GenerateFinalImage(imageTree, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "Failed generating metadata tree "+opts.ImageName)
|
||||
}
|
||||
if d.imagePush {
|
||||
if err := pushImage(d.b, imageTree, true); err != nil {
|
||||
return errors.Wrapf(err, "Failed while pushing image: '%s'", imageTree)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushRepoMetadata(repospec string, r *LuetSystemRepository) error {
|
||||
// create temp dir for metafile
|
||||
metaDir, err := config.LuetCfg.GetSystem().TempDir("metadata")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for metadata")
|
||||
}
|
||||
defer os.RemoveAll(metaDir) // clean up
|
||||
|
||||
tempRepoFile := filepath.Join(metaDir, REPOSITORY_SPECFILE+".tar")
|
||||
if err := helpers.Tar(repospec, tempRepoFile); err != nil {
|
||||
return errors.Wrap(err, "Error met while archiving repository file")
|
||||
}
|
||||
|
||||
a := artifact.NewPackageArtifact(tempRepoFile)
|
||||
imageRepo := fmt.Sprintf("%s:%s", d.imagePrefix, REPOSITORY_SPECFILE)
|
||||
|
||||
if err := d.pushFileFromArtifact(a, imageRepo); err != nil {
|
||||
return errors.Wrap(err, "while pushing file from artifact")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushImageFromArtifact(a *artifact.PackageArtifact, b compiler.CompilerBackend) error {
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
treeArchive, err := artifact.CreateArtifactForFile(a.Path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed generating checksums for tree")
|
||||
}
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, a.GetFileName())
|
||||
|
||||
return d.pushFileFromArtifact(treeArchive, imageTree)
|
||||
}
|
||||
|
||||
// Generate creates a Docker luet repository
|
||||
func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefix string, resetRevision bool) error {
|
||||
// - Iterate over meta, build final images, push them if necessary
|
||||
// - while pushing, check if image already exists, and if exist push them only if --force is supplied
|
||||
// - Generate final images for metadata and push
|
||||
|
||||
imageRepository := fmt.Sprintf("%s:%s", imagePrefix, REPOSITORY_SPECFILE)
|
||||
|
||||
r.LastUpdate = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
repoTemp, err := config.LuetCfg.GetSystem().TempDir("repo")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while creating tempdir for repository")
|
||||
}
|
||||
defer os.RemoveAll(repoTemp) // clean up
|
||||
|
||||
if r.GetBackend().ImageAvailable(imageRepository) {
|
||||
if err := r.GetBackend().DownloadImage(backend.Options{ImageName: imageRepository}); err != nil {
|
||||
return errors.Wrapf(err, "while downloading '%s'", imageRepository)
|
||||
}
|
||||
|
||||
if err := r.GetBackend().ExtractRootfs(backend.Options{ImageName: imageRepository, Destination: repoTemp}, false); err != nil {
|
||||
return errors.Wrapf(err, "while extracting '%s'", imageRepository)
|
||||
}
|
||||
}
|
||||
|
||||
repospec := filepath.Join(repoTemp, REPOSITORY_SPECFILE)
|
||||
|
||||
// Increment the internal revision version by reading the one which is already available (if any)
|
||||
if err := r.BumpRevision(repospec, resetRevision); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf(
|
||||
"For repository %s creating revision %d and last update %s...",
|
||||
r.Name, r.Revision, r.LastUpdate,
|
||||
))
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPreBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: imageRepository,
|
||||
})
|
||||
|
||||
// Create tree and repository file
|
||||
a, err := r.AddTree(r.GetTree(), repoTemp, REPOFILE_TREE_KEY, NewDefaultTreeRepositoryFile())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while adding runtime tree to repository")
|
||||
}
|
||||
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing runtime tree")
|
||||
}
|
||||
|
||||
a, err = r.AddTree(r.BuildTree, repoTemp, REPOFILE_COMPILER_TREE_KEY, NewDefaultCompilerTreeRepositoryFile())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while adding compiler tree to repository")
|
||||
}
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing compiler tree")
|
||||
}
|
||||
|
||||
// create temp dir for metafile
|
||||
metaDir, err := config.LuetCfg.GetSystem().TempDir("metadata")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while creating tempdir for metadata")
|
||||
}
|
||||
defer os.RemoveAll(metaDir) // clean up
|
||||
|
||||
a, err = r.AddMetadata(repospec, metaDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed adding Metadata file to repository")
|
||||
}
|
||||
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing docker image from artifact")
|
||||
}
|
||||
|
||||
if err := d.pushRepoMetadata(repospec, r); err != nil {
|
||||
return errors.Wrap(err, "while pushing repository metadata tree")
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPostBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: imagePrefix,
|
||||
})
|
||||
return nil
|
||||
}
|
132
pkg/installer/repository_local.go
Normal file
132
pkg/installer/repository_local.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type localRepositoryGenerator struct{}
|
||||
|
||||
func (l *localRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
return buildPackageIndex(path, db)
|
||||
}
|
||||
|
||||
func buildPackageIndex(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
|
||||
var art []*artifact.PackageArtifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
Debug("Failed walking", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".metadata.yaml") {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
a, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
// We want to include packages that are ONLY referenced in the tree.
|
||||
// the ones which aren't should be deleted. (TODO: by another cli command?)
|
||||
if _, notfound := db.FindPackage(a.CompileSpec.GetPackage()); notfound != nil {
|
||||
Debug(fmt.Sprintf("Package %s not found in tree. Ignoring it.",
|
||||
a.CompileSpec.GetPackage().HumanReadableString()))
|
||||
return nil
|
||||
}
|
||||
|
||||
art = append(art, a)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return art, nil
|
||||
}
|
||||
|
||||
// Generate creates a Local luet repository
|
||||
func (*localRepositoryGenerator) Generate(r *LuetSystemRepository, dst string, resetRevision bool) error {
|
||||
err := os.MkdirAll(dst, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.LastUpdate = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
repospec := filepath.Join(dst, REPOSITORY_SPECFILE)
|
||||
// Increment the internal revision version by reading the one which is already available (if any)
|
||||
if err := r.BumpRevision(repospec, resetRevision); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf(
|
||||
"Repository %s: creating revision %d and last update %s...",
|
||||
r.Name, r.Revision, r.LastUpdate,
|
||||
))
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPreBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: dst,
|
||||
})
|
||||
|
||||
if _, err := r.AddTree(r.GetTree(), dst, REPOFILE_TREE_KEY, NewDefaultTreeRepositoryFile()); err != nil {
|
||||
return errors.Wrap(err, "error met while adding runtime tree to repository")
|
||||
}
|
||||
|
||||
if _, err := r.AddTree(r.BuildTree, dst, REPOFILE_COMPILER_TREE_KEY, NewDefaultCompilerTreeRepositoryFile()); err != nil {
|
||||
return errors.Wrap(err, "error met while adding compiler tree to repository")
|
||||
}
|
||||
|
||||
if _, err := r.AddMetadata(repospec, dst); err != nil {
|
||||
return errors.Wrap(err, "failed adding Metadata file to repository")
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPostBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: dst,
|
||||
})
|
||||
return nil
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -26,18 +26,19 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func dockerStubRepo(tmpdir, tree, image string, push, force bool) (installer.Repository, error) {
|
||||
func dockerStubRepo(tmpdir, tree, image string, push, force bool) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
@@ -46,7 +47,7 @@ func dockerStubRepo(tmpdir, tree, image string, push, force bool) (installer.Rep
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), backend.NewSimpleDockerBackend(), image, push, force)
|
||||
pkg.NewInMemoryDatabase(false), backend.NewSimpleDockerBackend(), image, push, force, false, nil)
|
||||
}
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
@@ -64,7 +65,7 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -79,12 +80,11 @@ var _ = Describe("Repository", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -132,11 +132,11 @@ var _ = Describe("Repository", func() {
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase())
|
||||
spec2, err := compiler2.FromPackage(&pkg.DefaultPackage{Name: "alpine", Category: "seed", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -153,18 +153,16 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
compiler2.SetConcurrency(1)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
artifact2, err := compiler2.Compile(false, spec2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact2.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact2.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -254,7 +252,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := localcompiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -266,12 +264,11 @@ urls:
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
localcompiler.SetConcurrency(1)
|
||||
|
||||
artifact, err := localcompiler.Compile(false, spec)
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
@@ -300,9 +297,9 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err := c.DownloadArtifact(&compiler.PackageArtifact{
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "b",
|
||||
Category: "test",
|
||||
@@ -315,5 +312,156 @@ urls:
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(extracted, "test6"))).To(Equal("artifact6\n"))
|
||||
})
|
||||
|
||||
It("generates images of virtual packages", func() {
|
||||
b := backend.NewSimpleDockerBackend()
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/virtuals")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(5))
|
||||
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := localcompiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.99"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
repo, err := dockerStubRepo(tmpdir, "../../tests/fixtures/virtuals", repoImage, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(repoImage, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", repoImage, "tree.tar.gz"))).To(BeTrue())
|
||||
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", repoImage, "repository.meta.yaml.tar"))).To(BeTrue())
|
||||
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", repoImage, "repository.yaml"))).To(BeTrue())
|
||||
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", repoImage, "a-test-1.99"))).To(BeTrue())
|
||||
|
||||
extracted, err := ioutil.TempDir("", "extracted")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(extracted) // clean up
|
||||
|
||||
c := repo.Client()
|
||||
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "a",
|
||||
Category: "test",
|
||||
Version: "1.99",
|
||||
},
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(extracted)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(extracted, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(string(content)).To(Equal(""))
|
||||
})
|
||||
|
||||
It("Searches files", func() {
|
||||
repos := Repositories{
|
||||
&LuetSystemRepository{
|
||||
Index: compiler.ArtifactIndex{
|
||||
&artifact.PackageArtifact{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{},
|
||||
},
|
||||
Path: "bar",
|
||||
Files: []string{"boo"},
|
||||
},
|
||||
&artifact.PackageArtifact{
|
||||
Path: "d",
|
||||
Files: []string{"baz"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
matches := repos.SearchPackages("bo", FileSearch)
|
||||
Expect(len(matches)).To(Equal(1))
|
||||
Expect(matches[0].Artifact.Path).To(Equal("bar"))
|
||||
})
|
||||
|
||||
It("Searches packages", func() {
|
||||
repo := &LuetSystemRepository{
|
||||
Index: compiler.ArtifactIndex{
|
||||
&artifact.PackageArtifact{
|
||||
Path: "foo",
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "bar",
|
||||
Version: "1.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
&artifact.PackageArtifact{
|
||||
Path: "baz",
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "baz",
|
||||
Version: "1.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
a, err := repo.SearchArtefact(&pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "baz",
|
||||
Version: "1.0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(a.Path).To(Equal("baz"))
|
||||
|
||||
a, err = repo.SearchArtefact(&pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "bar",
|
||||
Version: "1.0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(a.Path).To(Equal("foo"))
|
||||
|
||||
// Doesn't exist. so must fail
|
||||
_, err = repo.SearchArtefact(&pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "bar",
|
||||
Version: "1.1",
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -24,7 +24,7 @@ func (s *System) ExecuteFinalizers(packs []pkg.Package) error {
|
||||
executedFinalizer := map[string]bool{}
|
||||
for _, p := range packs {
|
||||
if helpers.Exists(p.Rel(tree.FinalizerFile)) {
|
||||
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile), "")
|
||||
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile))
|
||||
if err != nil {
|
||||
Warning("Failed rendering finalizer for ", p.HumanReadableString(), err.Error())
|
||||
errs = multierror.Append(errs, err)
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"sync"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
@@ -20,7 +21,7 @@ import (
|
||||
var s *spinner.Spinner = nil
|
||||
var z *zap.Logger = nil
|
||||
var aurora Aurora = nil
|
||||
|
||||
var spinnerLock = sync.Mutex{}
|
||||
func NewSpinner() {
|
||||
if s == nil {
|
||||
s = spinner.New(
|
||||
@@ -84,6 +85,8 @@ func ZapLogger() error {
|
||||
}
|
||||
|
||||
func Spinner(i int) {
|
||||
spinnerLock.Lock()
|
||||
defer spinnerLock.Unlock()
|
||||
var confLevel int
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
confLevel = 3
|
||||
@@ -120,6 +123,8 @@ func SpinnerText(suffix, prefix string) {
|
||||
}
|
||||
|
||||
func SpinnerStop() {
|
||||
spinnerLock.Lock()
|
||||
defer spinnerLock.Unlock()
|
||||
var confLevel int
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
confLevel = 3
|
||||
@@ -173,7 +178,7 @@ func level2AtomicLevel(level string) zap.AtomicLevel {
|
||||
}
|
||||
}
|
||||
|
||||
func msg(level string, withoutColor bool, msg ...interface{}) {
|
||||
func Msg(level string, withoutColor, ln bool, msg ...interface{}) {
|
||||
var message string
|
||||
var confLevel, msgLevel int
|
||||
|
||||
@@ -219,11 +224,16 @@ func msg(level string, withoutColor bool, msg ...interface{}) {
|
||||
log2File(level, message)
|
||||
}
|
||||
|
||||
fmt.Println(levelMsg)
|
||||
if ln {
|
||||
fmt.Println(levelMsg)
|
||||
} else {
|
||||
fmt.Print(levelMsg)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Warning(mess ...interface{}) {
|
||||
msg("warning", false, mess...)
|
||||
Msg("warning", false, true, mess...)
|
||||
if LuetCfg.GetGeneral().FatalWarns {
|
||||
os.Exit(2)
|
||||
}
|
||||
@@ -235,23 +245,23 @@ func Debug(mess ...interface{}) {
|
||||
mess = append([]interface{}{fmt.Sprintf("DEBUG (%s:#%d:%v)",
|
||||
path.Base(file), line, runtime.FuncForPC(pc).Name())}, mess...)
|
||||
}
|
||||
msg("debug", false, mess...)
|
||||
Msg("debug", false, true, mess...)
|
||||
}
|
||||
|
||||
func DebugC(mess ...interface{}) {
|
||||
msg("debug", true, mess...)
|
||||
Msg("debug", true, true, mess...)
|
||||
}
|
||||
|
||||
func Info(mess ...interface{}) {
|
||||
msg("info", false, mess...)
|
||||
Msg("info", false, true, mess...)
|
||||
}
|
||||
|
||||
func InfoC(mess ...interface{}) {
|
||||
msg("info", true, mess...)
|
||||
Msg("info", true, true, mess...)
|
||||
}
|
||||
|
||||
func Error(mess ...interface{}) {
|
||||
msg("error", false, mess...)
|
||||
Msg("error", false, true, mess...)
|
||||
}
|
||||
|
||||
func Fatal(mess ...interface{}) {
|
||||
|
@@ -52,6 +52,7 @@ type PackageSet interface {
|
||||
FindPackageLabel(labelKey string) (Packages, error)
|
||||
FindPackageLabelMatch(pattern string) (Packages, error)
|
||||
FindPackageMatch(pattern string) (Packages, error)
|
||||
FindPackageByFile(pattern string) (Packages, error)
|
||||
}
|
||||
|
||||
type PackageFile struct {
|
||||
|
@@ -436,9 +436,9 @@ func (db *BoltDatabase) FindPackageLabel(labelKey string) (Packages, error) {
|
||||
func (db *BoltDatabase) FindPackageLabelMatch(pattern string) (Packages, error) {
|
||||
var ans []Package
|
||||
|
||||
re := regexp.MustCompile(pattern)
|
||||
if re == nil {
|
||||
return nil, errors.New("Invalid regex " + pattern + "!")
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Invalid regex "+pattern+"!")
|
||||
}
|
||||
|
||||
for _, pack := range db.World() {
|
||||
@@ -450,12 +450,15 @@ func (db *BoltDatabase) FindPackageLabelMatch(pattern string) (Packages, error)
|
||||
return Packages(ans), nil
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) FindPackageByFile(pattern string) (Packages, error) {
|
||||
return findPackageByFile(db, pattern)
|
||||
}
|
||||
func (db *BoltDatabase) FindPackageMatch(pattern string) (Packages, error) {
|
||||
var ans []Package
|
||||
|
||||
re := regexp.MustCompile(pattern)
|
||||
if re == nil {
|
||||
return nil, errors.New("Invalid regex " + pattern + "!")
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Invalid regex "+pattern+"!")
|
||||
}
|
||||
|
||||
for _, pack := range db.World() {
|
||||
|
@@ -57,6 +57,31 @@ var _ = Describe("BoltDB Database", func() {
|
||||
|
||||
})
|
||||
|
||||
It("Find package files", func() {
|
||||
a := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a1 := NewPackage("A", "1.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a3 := NewPackage("A", "1.3", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
_, err := db.CreatePackage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a3)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = db.SetPackageFiles(&PackageFile{PackageFingerprint: a.GetFingerPrint(), Files: []string{"foo"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = db.SetPackageFiles(&PackageFile{PackageFingerprint: a1.GetFingerPrint(), Files: []string{"bar"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
pack, err := db.FindPackageByFile("fo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(pack)).To(Equal(1))
|
||||
Expect(pack[0]).To(Equal(a))
|
||||
})
|
||||
|
||||
It("Expands correctly", func() {
|
||||
|
||||
a := NewPackage("A", ">=1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
|
@@ -15,7 +15,11 @@
|
||||
|
||||
package pkg
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func clone(src, dst PackageDatabase) error {
|
||||
for _, i := range src.World() {
|
||||
@@ -36,3 +40,29 @@ func copy(src PackageDatabase) (PackageDatabase, error) {
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func findPackageByFile(db PackageDatabase, pattern string) (Packages, error) {
|
||||
|
||||
var ans []Package
|
||||
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Invalid regex "+pattern+"!")
|
||||
}
|
||||
|
||||
PACKAGE:
|
||||
for _, pack := range db.World() {
|
||||
files, err := db.GetPackageFiles(pack)
|
||||
if err == nil {
|
||||
for _, f := range files {
|
||||
if re.MatchString(f) {
|
||||
ans = append(ans, pack)
|
||||
continue PACKAGE
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Packages(ans), nil
|
||||
|
||||
}
|
||||
|
@@ -32,6 +32,7 @@ var DBInMemoryInstance = &InMemoryDatabase{
|
||||
CacheNoVersion: map[string]map[string]interface{}{},
|
||||
ProvidesDatabase: map[string]map[string]Package{},
|
||||
RevDepsDatabase: map[string]map[string]Package{},
|
||||
cached: map[string]interface{}{},
|
||||
}
|
||||
|
||||
type InMemoryDatabase struct {
|
||||
@@ -199,6 +200,10 @@ func (db *InMemoryDatabase) populateCaches(p Package) {
|
||||
|
||||
// Create extra cache between package -> []versions
|
||||
db.Lock()
|
||||
if db.cached == nil {
|
||||
db.cached = map[string]interface{}{}
|
||||
}
|
||||
|
||||
if _, ok := db.cached[p.GetFingerPrint()]; ok {
|
||||
db.Unlock()
|
||||
return
|
||||
@@ -555,3 +560,7 @@ func (db *InMemoryDatabase) FindPackageMatch(pattern string) (Packages, error) {
|
||||
|
||||
return Packages(ans), nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) FindPackageByFile(pattern string) (Packages, error) {
|
||||
return findPackageByFile(db, pattern)
|
||||
}
|
||||
|
@@ -77,6 +77,32 @@ var _ = Describe("Database", func() {
|
||||
|
||||
})
|
||||
|
||||
It("Find package files", func() {
|
||||
db := NewInMemoryDatabase(false)
|
||||
a := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a1 := NewPackage("A", "1.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a3 := NewPackage("A", "1.3", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
_, err := db.CreatePackage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a3)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = db.SetPackageFiles(&PackageFile{PackageFingerprint: a.GetFingerPrint(), Files: []string{"foo"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = db.SetPackageFiles(&PackageFile{PackageFingerprint: a1.GetFingerPrint(), Files: []string{"bar"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
pack, err := db.FindPackageByFile("fo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(pack)).To(Equal(1))
|
||||
Expect(pack[0]).To(Equal(a))
|
||||
})
|
||||
|
||||
It("Find specific package candidate", func() {
|
||||
db := NewInMemoryDatabase(false)
|
||||
a := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
|
@@ -46,7 +46,7 @@ type Package interface {
|
||||
|
||||
GetFingerPrint() string
|
||||
GetPackageName() string
|
||||
GetPackageImageName() string
|
||||
ImageID() string
|
||||
Requires([]*DefaultPackage) Package
|
||||
Conflicts([]*DefaultPackage) Package
|
||||
Revdeps(PackageDatabase) Packages
|
||||
@@ -113,6 +113,12 @@ type Package interface {
|
||||
GetBuildTimestamp() string
|
||||
|
||||
Clone() Package
|
||||
|
||||
GetMetadataFilePath() string
|
||||
SetTreeDir(s string)
|
||||
GetTreeDir() string
|
||||
|
||||
JSON() ([]byte, error)
|
||||
}
|
||||
|
||||
type Tree interface {
|
||||
@@ -210,6 +216,11 @@ func (t *DefaultPackage) JSON() ([]byte, error) {
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
// GetMetadataFilePath returns the canonical name of an artifact metadata file
|
||||
func (d *DefaultPackage) GetMetadataFilePath() string {
|
||||
return d.GetFingerPrint() + ".metadata.yaml"
|
||||
}
|
||||
|
||||
// DefaultPackage represent a standard package definition
|
||||
type DefaultPackage struct {
|
||||
ID int `storm:"id,increment" json:"id"` // primary key with auto increment
|
||||
@@ -235,6 +246,8 @@ type DefaultPackage struct {
|
||||
BuildTimestamp string `json:"buildtimestamp,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty"` // Affects YAML field names too.
|
||||
|
||||
TreeDir string `json:"treedir,omitempty"`
|
||||
}
|
||||
|
||||
// State represent the package state
|
||||
@@ -251,6 +264,12 @@ func NewPackage(name, version string, requires []*DefaultPackage, conflicts []*D
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) SetTreeDir(s string) {
|
||||
p.TreeDir = s
|
||||
}
|
||||
func (p *DefaultPackage) GetTreeDir() string {
|
||||
return p.TreeDir
|
||||
}
|
||||
func (p *DefaultPackage) String() string {
|
||||
b, err := p.JSON()
|
||||
if err != nil {
|
||||
@@ -289,8 +308,8 @@ func (p *DefaultPackage) GetPackageName() string {
|
||||
return fmt.Sprintf("%s-%s", p.Name, p.Category)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) GetPackageImageName() string {
|
||||
return fmt.Sprintf("%s-%s:%s", p.Name, p.Category, p.Version)
|
||||
func (p *DefaultPackage) ImageID() string {
|
||||
return strings.ReplaceAll(p.GetFingerPrint(), "+", "-")
|
||||
}
|
||||
|
||||
// GetBuildTimestamp returns the package build timestamp
|
||||
|
@@ -66,6 +66,17 @@ var _ = Describe("Package", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("ImageID", func() {
|
||||
It("Returns a correct ImageID escaping unsupported chars", func() {
|
||||
p := NewPackage("A", "1.0+p1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
Expect(p.ImageID()).To(Equal("A--1.0-p1"))
|
||||
})
|
||||
It("Returns a correct ImageID", func() {
|
||||
p := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
Expect(p.ImageID()).To(Equal("A--1.0"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("Find label on packages", func() {
|
||||
a := NewPackage("A", ">=1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a.AddLabel("project1", "test1")
|
||||
|
@@ -57,6 +57,17 @@ type CompilerRecipe struct {
|
||||
Recipe
|
||||
}
|
||||
|
||||
// CompilerRecipes copies tree 1:1 as they contain the specs
|
||||
// and the build context required for reproducible builds
|
||||
func (r *CompilerRecipe) Save(path string) error {
|
||||
for _, p := range r.SourcePath {
|
||||
if err := helpers.CopyDir(p, filepath.Join(path, filepath.Base(p))); err != nil {
|
||||
return errors.Wrap(err, "while copying source tree")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CompilerRecipe) Load(path string) error {
|
||||
|
||||
r.SourcePath = append(r.SourcePath, path)
|
||||
@@ -87,12 +98,13 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
}
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
pack.SetTreeDir(path)
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
if helpers.Exists(compileDefPath) {
|
||||
|
||||
dat, err := helpers.RenderFiles(compileDefPath, currentpath, "")
|
||||
dat, err := helpers.RenderFiles(compileDefPath, currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err,
|
||||
"Error templating file "+CompilerDefinitionFile+" from "+
|
||||
@@ -115,18 +127,25 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
}
|
||||
|
||||
case CollectionFile:
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
packs, err := pkg.DefaultPackagesFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
packsRaw, err := pkg.GetRawPackages(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading raw packages from "+currentpath)
|
||||
}
|
||||
|
||||
for _, pack := range packs {
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
pack.SetTreeDir(path)
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
@@ -159,9 +178,7 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
tests/fixtures/owners/delta/build.yaml
vendored
Normal file
8
tests/fixtures/owners/delta/build.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
image: "busybox"
|
||||
|
||||
steps:
|
||||
- adduser foo -D
|
||||
- addgroup bar
|
||||
- touch /foo
|
||||
- chown foo:bar /foo
|
||||
- chmod 500 /foo
|
3
tests/fixtures/owners/delta/definition.yaml
vendored
Normal file
3
tests/fixtures/owners/delta/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "delta"
|
||||
version: "1.0"
|
13
tests/fixtures/owners/unpack/build.yaml
vendored
Normal file
13
tests/fixtures/owners/unpack/build.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
image: "busybox"
|
||||
|
||||
steps:
|
||||
- adduser baz -D
|
||||
- addgroup ba
|
||||
- touch /bar
|
||||
- chown baz:ba /bar
|
||||
- chmod 600 /bar
|
||||
- ls -liah /bar
|
||||
unpack: true
|
||||
|
||||
includes:
|
||||
- bar
|
3
tests/fixtures/owners/unpack/definition.yaml
vendored
Normal file
3
tests/fixtures/owners/unpack/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "unpack"
|
||||
version: "1.0"
|
128
tests/integration/01_rootfs.sh
Executable file
128
tests/integration/01_rootfs.sh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/buildableseed" --destination $tmpdir/testbuild --compression gzip test/c > /dev/null
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/buildableseed" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_engine: "memory"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/c
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package cleaned' "[ ! -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testInstall2() {
|
||||
luet install -y --config $tmpdir/luet.yaml --system-target $tmpdir/foo test/c
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'db not created' "[ ! -e '$tmpdir/foo/var/cache/luet/luet.db' ]"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/foo/c' ]"
|
||||
}
|
||||
|
||||
testCleanup2() {
|
||||
luet cleanup --config $tmpdir/luet.yaml --system-target $tmpdir/foo
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package cleaned' "[ ! -e '$tmpdir/foo/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testInstall3() {
|
||||
cat <<EOF > $tmpdir/luet2.yaml
|
||||
general:
|
||||
debug: true
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet install -y --config $tmpdir/luet2.yaml --system-target $tmpdir/baz test/c
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/baz/c' ]"
|
||||
}
|
||||
|
||||
testCleanup3() {
|
||||
luet cleanup --config $tmpdir/luet2.yaml --system-target $tmpdir/baz
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package cleaned' "[ ! -e '$tmpdir/baz/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testInstall4() {
|
||||
luet install -y --config $tmpdir/luet2.yaml --system-target $tmpdir/bad --system-engine boltdb test/c
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/bad/c' ]"
|
||||
assertTrue 'db created' "[ -d '$tmpdir/bad/var/cache/luet' ]"
|
||||
}
|
||||
|
||||
testCleanup4() {
|
||||
luet cleanup --config $tmpdir/luet2.yaml --system-target $tmpdir/bad --system-engine boltdb
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package cleaned' "[ ! -e '$tmpdir/bad/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
80
tests/integration/01_simple_downloadonly.sh
Executable file
80
tests/integration/01_simple_downloadonly.sh
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/buildableseed" --destination $tmpdir/testbuild --compression gzip test/c > /dev/null
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/buildableseed" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_engine: "memory"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testDownloadOnly() {
|
||||
luet install -y --download-only --config $tmpdir/luet.yaml test/c > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package not installed' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'cache populated' "[ -e '$tmpdir/testrootfs/var/cache/luet/packages/c-test-1.0.package.tar.gz' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
luet cleanup --config $tmpdir/luet.yaml > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ ! -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
104
tests/integration/01_simple_http.sh
Executable file
104
tests/integration/01_simple_http.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
TEST_PORT="${TEST_PORT:-9090}"
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
kill '%1' || true
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/buildableseed" --destination $tmpdir/testbuild --compression zstd test/c@1.0 > /dev/null
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/buildableseed" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--tree-compression zstd \
|
||||
--tree-filename foo.tar \
|
||||
--meta-filename repository.meta.tar \
|
||||
--meta-compression zstd \
|
||||
--type disk
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
|
||||
luet serve-repo --dir $tmpdir/testbuild --port $TEST_PORT &
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "http"
|
||||
enable: true
|
||||
urls:
|
||||
- "http://127.0.0.1:$TEST_PORT"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
output=$(luet install -y --config $tmpdir/luet.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertContains 'contains warning' "$output" 'No packages to install'
|
||||
}
|
||||
|
||||
testUnInstall() {
|
||||
luet uninstall -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testInstallAgain() {
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
output=$(luet install -y --config $tmpdir/luet.yaml test/c@1.0)
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertNotContains 'contains warning' "$output" 'No packages to install'
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
assertTrue 'package in cache' "[ -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.zst' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'cleanup test successfully' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
74
tests/integration/01_simple_inmemory.sh
Executable file
74
tests/integration/01_simple_inmemory.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/buildableseed" --destination $tmpdir/testbuild --compression gzip test/c > /dev/null
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package dep B' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/buildableseed" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_engine: "memory"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/c
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testCleanup() {
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed' "[ ! -e '$tmpdir/testrootfs/packages/c-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
@@ -12,6 +12,7 @@ oneTimeTearDown() {
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" --destination $tmpdir/testbuild --compression gzip test/b
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
@@ -20,6 +21,7 @@ testBuild() {
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" \
|
||||
--output $tmpdir/testbuild \
|
||||
@@ -59,6 +61,7 @@ EOF
|
||||
|
||||
|
||||
testInstall() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
luet install -y --config $tmpdir/luet.yaml test/b
|
||||
#luet install -y --config $tmpdir/luet.yaml test/c-1.0 > /dev/null
|
||||
installst=$?
|
||||
@@ -71,6 +74,7 @@ testInstall() {
|
||||
|
||||
|
||||
testUnInstall() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
luet uninstall -y --full --config $tmpdir/luet.yaml test/b
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
@@ -80,6 +84,7 @@ testUnInstall() {
|
||||
|
||||
|
||||
testCleanup() {
|
||||
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
@@ -95,13 +95,6 @@ testInstall() {
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
@@ -84,26 +84,11 @@ EOF
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0 test/a@1.0 test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
|
128
tests/integration/06_search_files.sh
Executable file
128
tests/integration/06_search_files.sh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/b@1.0
|
||||
buildst=$?
|
||||
assertTrue 'create package B 1.0' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/b@1.1
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package B 1.1' "[ -e '$tmpdir/testbuild/b-test-1.1.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/a@1.0
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package A 1.0' "[ -e '$tmpdir/testbuild/a-test-1.0.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/a@1.1
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
assertTrue 'create package A 1.1' "[ -e '$tmpdir/testbuild/a-test-1.1.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/a@1.2
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
assertTrue 'create package A 1.2' "[ -e '$tmpdir/testbuild/a-test-1.2.package.tar.gz' ]"
|
||||
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" --destination $tmpdir/testbuild --compression gzip test/c@1.0
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package C 1.0' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
|
||||
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/upgrade_integration" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
|
||||
testSearch() {
|
||||
installed=$(luet --config $tmpdir/luet.yaml search --files testaa)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
assertContains 'contains test/a-1.0' "$installed" 'test/a-1.0'
|
||||
}
|
||||
|
||||
testGetSearchLocal() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
installed=$(luet --config $tmpdir/luet.yaml database get --files test/a@1.0)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
assertContains 'contains file' "$installed" 'testaa'
|
||||
|
||||
installed=$(luet --config $tmpdir/luet.yaml database get test/a@1.0)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
assertNotContains 'contains file' "$installed" 'testaa'
|
||||
|
||||
|
||||
|
||||
installed=$(luet --config $tmpdir/luet.yaml search --installed --files testaa)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
|
||||
assertContains 'contains test/a-1.1' "$installed" 'test/a-1.0'
|
||||
|
||||
installed=$(luet --config $tmpdir/luet.yaml search --installed --files foo)
|
||||
searchst=$?
|
||||
assertEquals 'search exists successfully' "$searchst" "0"
|
||||
|
||||
assertNotContains 'contains test/a-1.1' "$installed" 'test/a-1.0'
|
||||
}
|
||||
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
@@ -85,26 +85,11 @@ EOF
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0 test/a@1.0 test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.1
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package keeps old A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package new A was not installed' "[ ! -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user